+ export WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release + WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release + [[ openshift-3.9-crio-release =~ openshift-.* ]] + [[ openshift-3.9-crio-release =~ .*-crio ]] + export KUBEVIRT_PROVIDER=os-3.9.0-crio + KUBEVIRT_PROVIDER=os-3.9.0-crio + export KUBEVIRT_NUM_NODES=2 + KUBEVIRT_NUM_NODES=2 + export NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + export NAMESPACE=kube-system + NAMESPACE=kube-system + trap '{ make cluster-down; }' EXIT + make cluster-down ./cluster/down.sh + make cluster-up ./cluster/up.sh Downloading ....... Downloading ....... Downloading ....... 2018/06/05 20:10:34 Waiting for host: 192.168.66.102:22 2018/06/05 20:10:37 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:10:45 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:10:53 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:10:58 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: connection refused. Sleeping 5s 2018/06/05 20:11:03 Connected to tcp://192.168.66.102:22 + systemctl stop origin-master-api + systemctl disable origin-master-api Removed symlink /etc/systemd/system/multi-user.target.wants/origin-master-api.service. Removed symlink /etc/systemd/system/origin-node.service.wants/origin-master-api.service. + systemctl stop origin-master-controllers + systemctl disable origin-master-controllers Removed symlink /etc/systemd/system/multi-user.target.wants/origin-master-controllers.service. 2018/06/05 20:11:06 Waiting for host: 192.168.66.101:22 2018/06/05 20:11:09 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:11:17 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:11:25 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:11:31 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: connection refused. Sleeping 5s 2018/06/05 20:11:36 Connected to tcp://192.168.66.101:22 + set +e + /usr/bin/oc get nodes The connection to the server node01:8443 was refused - did you specify the right host or port? + '[' 1 -ne 0 ']' + sleep 5 + /usr/bin/oc get nodes NAME STATUS ROLES AGE VERSION node01 Ready master 15d v1.9.1+a0ce1bc657 + '[' 0 -ne 0 ']' + set -e + inventory_file=/root/inventory + echo '[new_nodes]' + sed -i '/\[OSEv3:children\]/a new_nodes' /root/inventory + nodes_found=false ++ seq 2 100 + for i in '$(seq 2 100)' ++ printf node%02d 2 + node=node02 ++ printf 192.168.66.1%02d 2 + node_ip=192.168.66.102 + set +e + ping 192.168.66.102 -c 1 PING 192.168.66.102 (192.168.66.102) 56(84) bytes of data. 64 bytes from 192.168.66.102: icmp_seq=1 ttl=64 time=1.27 ms --- 192.168.66.102 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 1.272/1.272/1.272/0.000 ms + '[' 0 -ne 0 ']' + nodes_found=true Found node02. Adding it to the inventory. + set -e + echo '192.168.66.102 node02' + echo 'Found node02. Adding it to the inventory.' + echo 'node02 openshift_node_labels="{'\''region'\'': '\''infra'\'','\''zone'\'': '\''default'\''}" openshift_schedulable=true openshift_ip=192.168.66.102' + for i in '$(seq 2 100)' ++ printf node%02d 3 + node=node03 ++ printf 192.168.66.1%02d 3 + node_ip=192.168.66.103 + set +e + ping 192.168.66.103 -c 1 PING 192.168.66.103 (192.168.66.103) 56(84) bytes of data. From 192.168.66.101 icmp_seq=1 Destination Host Unreachable --- 192.168.66.103 ping statistics --- 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms + '[' 1 -ne 0 ']' + break + '[' true = true ']' + ansible-playbook -i /root/inventory /usr/share/ansible/openshift-ansible/playbooks/openshift-node/scaleup.yml PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate groups - Fail if no etcd hosts group is defined] **************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Add master to oo_nodes_to_config] **************************************** skipping: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) PLAY [Ensure there are new_nodes] ********************************************** TASK [fail] ******************************************************************** skipping: [localhost] TASK [fail] ******************************************************************** skipping: [localhost] PLAY [Initialization Checkpoint Start] ***************************************** TASK [Set install initialization 'In Progress'] ******************************** ok: [node01] PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate groups - Fail if no etcd hosts group is defined] **************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Add master to oo_nodes_to_config] **************************************** skipping: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) [WARNING]: Could not match supplied host pattern, ignoring: oo_lb_to_config PLAY [Ensure that all non-node hosts are accessible] *************************** TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Initialize basic host facts] ********************************************* TASK [Gathering Facts] ********************************************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_sanitize_inventory/tasks/deprecations.yml for node01, node02 TASK [openshift_sanitize_inventory : Check for usage of deprecated variables] *** ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : debug] ************************************ skipping: [node02] skipping: [node01] TASK [openshift_sanitize_inventory : set_stats] ******************************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Assign deprecated variables to correct counterparts] *** included: /usr/share/ansible/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml for node02, node01 included: /usr/share/ansible/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml for node02, node01 TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : set_fact] ********************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : Standardize on latest variable names] ***** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Normalize openshift_release] ************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_sanitize_inventory/tasks/unsupported.yml for node01, node02 TASK [openshift_sanitize_inventory : Ensure that openshift_use_dnsmasq is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that openshift_node_dnsmasq_install_network_manager_hook is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* skipping: [node02] => (item=None) skipping: [node01] => (item=None) TASK [openshift_sanitize_inventory : Ensure that dynamic provisioning is set if using dynamic storage] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure clusterid is set along with the cloudprovider] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that all requires vsphere configuration variables are set] *** skipping: [node01] skipping: [node02] TASK [Detecting Operating System from ostree_booted] *************************** ok: [node02] ok: [node01] TASK [set openshift_deployment_type if unset] ********************************** skipping: [node01] skipping: [node02] TASK [initialize_facts set fact openshift_is_atomic and openshift_is_containerized] *** ok: [node01] ok: [node02] TASK [Determine Atomic Host Docker Version] ************************************ skipping: [node01] skipping: [node02] TASK [assert atomic host docker version is 1.12 or later] ********************** skipping: [node01] skipping: [node02] PLAY [Initialize special first-master variables] ******************************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Disable web console if required] ***************************************** TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Install packages necessary for installer] ******************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Ensure openshift-ansible installer package deps are installed] *********** ok: [node02] => (item=iproute) ok: [node02] => (item=dbus-python) ok: [node02] => (item=PyYAML) ok: [node02] => (item=python-ipaddress) ok: [node02] => (item=yum-utils) TASK [Ensure various deps for running system containers are installed] ********* skipping: [node02] => (item=atomic) skipping: [node02] => (item=ostree) skipping: [node02] => (item=runc) PLAY [Initialize cluster facts] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [Gather Cluster facts] **************************************************** changed: [node02] ok: [node01] TASK [Set fact of no_proxy_internal_hostnames] ********************************* skipping: [node01] skipping: [node02] TASK [Initialize openshift.node.sdn_mtu] *************************************** ok: [node01] ok: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* skipping: [node01] TASK [include_role] ************************************************************ skipping: [node01] TASK [debug] ******************************************************************* skipping: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** skipping: no hosts matched PLAY [Ensure the requested version packages are available.] ******************** skipping: no hosts matched PLAY [Verify Requirements] ***************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [Run variable sanity checks] ********************************************** ok: [node01] PLAY [Initialization Checkpoint End] ******************************************* TASK [Set install initialization 'Complete'] *********************************** ok: [node01] PLAY [Validate node hostnames] ************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Query DNS for IP address of node02] ************************************** ok: [node02] TASK [Validate openshift_hostname when defined] ******************************** skipping: [node02] TASK [Validate openshift_ip exists on node when defined] *********************** skipping: [node02] PLAY [Setup yum repositories for all hosts] ************************************ TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Install Red Hat Subscription manager] ******************* skipping: [node02] TASK [rhel_subscribe : Is host already registered?] **************************** skipping: [node02] TASK [rhel_subscribe : Register host] ****************************************** skipping: [node02] TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Determine if OpenShift Pool Already Attached] *********** skipping: [node02] TASK [rhel_subscribe : Attach to OpenShift Pool] ******************************* skipping: [node02] TASK [rhel_subscribe : include_tasks] ****************************************** skipping: [node02] TASK [openshift_repos : openshift_repos detect ostree] ************************* ok: [node02] TASK [openshift_repos : Ensure libselinux-python is installed] ***************** ok: [node02] TASK [openshift_repos : Remove openshift_additional.repo file] ***************** ok: [node02] TASK [openshift_repos : Create any additional repos that are defined] ********** TASK [openshift_repos : include_tasks] ***************************************** skipping: [node02] TASK [openshift_repos : include_tasks] ***************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_repos/tasks/centos_repos.yml for node02 TASK [openshift_repos : Configure origin gpg keys] ***************************** ok: [node02] TASK [openshift_repos : Configure correct origin release repository] *********** ok: [node02] => (item=/usr/share/ansible/openshift-ansible/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2) TASK [openshift_repos : Ensure clean repo cache in the event repos have been changed manually] *** changed: [node02] => { "msg": "First run of openshift_repos" } TASK [openshift_repos : Record that openshift_repos already ran] *************** ok: [node02] RUNNING HANDLER [openshift_repos : refresh cache] ****************************** changed: [node02] PLAY [Configure os_firewall] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [os_firewall : Detecting Atomic Host Operating System] ******************** ok: [node02] TASK [os_firewall : Set fact r_os_firewall_is_atomic] ************************** ok: [node02] TASK [os_firewall : include_tasks] ********************************************* skipping: [node02] TASK [os_firewall : include_tasks] ********************************************* included: /usr/share/ansible/openshift-ansible/roles/os_firewall/tasks/iptables.yml for node02 TASK [os_firewall : Ensure firewalld service is not enabled] ******************* ok: [node02] TASK [os_firewall : Wait 10 seconds after disabling firewalld] ***************** skipping: [node02] TASK [os_firewall : Install iptables packages] ********************************* ok: [node02] => (item=iptables) ok: [node02] => (item=iptables-services) TASK [os_firewall : Start and enable iptables service] ************************* ok: [node02 -> node02] => (item=node02) TASK [os_firewall : need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail] *** skipping: [node02] PLAY [create oo_hosts_containerized_managed_true host group] ******************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [group_by] **************************************************************** ok: [node01] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [container_runtime : Setup the docker-storage for overlay] **************** skipping: [node02] PLAY [create oo_hosts_containerized_managed_true host group] ******************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [group_by] **************************************************************** ok: [node01] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_excluder : Install excluders] ********************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/install.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** ok: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* ok: [node02] TASK [openshift_excluder : Enable excluders] *********************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/exclude.yml for node02 TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/pre.yml for node02 TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Get current installed Docker version] **************** ok: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/docker_sanity.yml for node02 TASK [container_runtime : Error out if Docker pre-installed but too old] ******* skipping: [node02] TASK [container_runtime : Error out if requested Docker is too old] ************ skipping: [node02] TASK [container_runtime : Fail if Docker version requested but downgrade is required] *** skipping: [node02] TASK [container_runtime : Error out if attempting to upgrade Docker across the 1.10 boundary] *** skipping: [node02] TASK [container_runtime : Install Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure docker.service.d directory exists] ************ ok: [node02] TASK [container_runtime : Configure Docker service unit file] ****************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Set registry params] ********************************* skipping: [node02] => (item={u'reg_conf_var': u'ADD_REGISTRY', u'reg_flag': u'--add-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'BLOCK_REGISTRY', u'reg_flag': u'--block-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'INSECURE_REGISTRY', u'reg_flag': u'--insecure-registry', u'reg_fact_val': []}) TASK [container_runtime : Place additional/blocked/insecure registries in /etc/containers/registries.conf] *** skipping: [node02] TASK [container_runtime : Set Proxy Settings] ********************************** skipping: [node02] => (item={u'reg_conf_var': u'HTTP_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'HTTPS_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'NO_PROXY', u'reg_fact_val': u''}) TASK [container_runtime : Set various Docker options] ************************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Configure Docker Network OPTIONS] ******************** ok: [node02] TASK [container_runtime : Detect if docker is already started] ***************** ok: [node02] TASK [container_runtime : Start the Docker service] **************************** ok: [node02] TASK [container_runtime : set_fact] ******************************************** ok: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/post.yml for node02 TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/registry_auth.yml for node02 TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Fail quickly if openshift_docker_options are set] **** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Install Docker so we can use the client] ************* skipping: [node02] TASK [container_runtime : Disable Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure proxies are in the atomic.conf] *************** skipping: [node02] TASK [container_runtime : debug] *********************************************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Pre-pull Container Engine System Container image] **** skipping: [node02] TASK [container_runtime : Ensure container-engine.service.d directory exists] *** skipping: [node02] TASK [container_runtime : Ensure /etc/docker directory exists] ***************** skipping: [node02] TASK [container_runtime : Install Container Engine System Container] *********** skipping: [node02] TASK [container_runtime : Configure Container Engine Service File] ************* skipping: [node02] TASK [container_runtime : Configure Container Engine] ************************** skipping: [node02] TASK [container_runtime : Start the Container Engine service] ****************** skipping: [node02] TASK [container_runtime : set_fact] ******************************************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Check we are not using node as a Docker container with CRI-O] *** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/pre.yml for node02 TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/syscontainer_packages.yml for node02 TASK [container_runtime : Ensure container-selinux is installed] *************** ok: [node02] TASK [container_runtime : Ensure atomic is installed] ************************** ok: [node02] TASK [container_runtime : Ensure runc is installed] **************************** ok: [node02] TASK [container_runtime : Check that overlay is in the kernel] ***************** changed: [node02] TASK [container_runtime : Add overlay to modprobe.d] *************************** skipping: [node02] TASK [container_runtime : Manually modprobe overlay into the kernel] *********** skipping: [node02] TASK [container_runtime : Enable and start systemd-modules-load] *************** skipping: [node02] TASK [container_runtime : Ensure proxies are in the atomic.conf] *************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/atomic_proxy.yml for node02 TASK [container_runtime : Add http_proxy to /etc/atomic.conf] ****************** skipping: [node02] TASK [container_runtime : Add https_proxy to /etc/atomic.conf] ***************** skipping: [node02] TASK [container_runtime : Add no_proxy to /etc/atomic.conf] ******************** skipping: [node02] TASK [container_runtime : debug] *********************************************** ok: [node02] => { "l_crio_image": "docker.io/kubevirtci/crio:1.9.10" } TASK [container_runtime : Pre-pull CRI-O System Container image] *************** ok: [node02] TASK [container_runtime : Install CRI-O System Container] ********************** ok: [node02] TASK [container_runtime : Remove CRI-O default configuration files] ************ ok: [node02] => (item=/etc/cni/net.d/200-loopback.conf) ok: [node02] => (item=/etc/cni/net.d/100-crio-bridge.conf) TASK [container_runtime : Create the CRI-O configuration] ********************** ok: [node02] TASK [container_runtime : Ensure CNI configuration directory exists] *********** ok: [node02] TASK [container_runtime : Add iptables allow rules] **************************** ok: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove iptables rules] ******************************* TASK [container_runtime : Add firewalld allow rules] *************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove firewalld allow rules] ************************ TASK [container_runtime : Configure the CNI network] *************************** ok: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-storage] ****************** ok: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-network] ****************** ok: [node02] TASK [container_runtime : Start the CRI-O service] ***************************** ok: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/post.yml for node02 TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/registry_auth.yml for node02 TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role] ************************************************************ TASK [openshift_version : Use openshift.common.version fact as version to configure if already installed] *** ok: [node01] TASK [openshift_version : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_version/tasks/first_master_rpm_version.yml for node01 TASK [openshift_version : Set rpm version to configure if openshift_pkg_version specified] *** skipping: [node01] TASK [openshift_version : Set openshift_version for rpm installation] ********** included: /usr/share/ansible/openshift-ansible/roles/openshift_version/tasks/check_available_rpms.yml for node01 TASK [openshift_version : Get available origin version] ************************ ok: [node01] TASK [openshift_version : fail] ************************************************ skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] TASK [openshift_version : set_fact] ******************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] TASK [debug] ******************************************************************* ok: [node01] => { "msg": "openshift_pkg_version set to -3.9.0" } PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] PLAY [Ensure the requested version packages are available.] ******************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [include_role] ************************************************************ TASK [openshift_version : Check openshift_version for rpm installation] ******** included: /usr/share/ansible/openshift-ansible/roles/openshift_version/tasks/check_available_rpms.yml for node02 TASK [openshift_version : Get available origin version] ************************ ok: [node02] TASK [openshift_version : fail] ************************************************ skipping: [node02] TASK [openshift_version : Fail if rpm version and docker image version are different] *** skipping: [node02] TASK [openshift_version : For an RPM install, abort when the release requested does not match the available version.] *** skipping: [node02] TASK [openshift_version : debug] *********************************************** ok: [node02] => { "openshift_release": "VARIABLE IS NOT DEFINED!" } TASK [openshift_version : debug] *********************************************** ok: [node02] => { "openshift_image_tag": "v3.9.0" } TASK [openshift_version : debug] *********************************************** ok: [node02] => { "openshift_pkg_version": "-3.9.0" } PLAY [Node Install Checkpoint Start] ******************************************* TASK [Set Node install 'In Progress'] ****************************************** ok: [node01] PLAY [Create OpenShift certificates for node hosts] **************************** TASK [openshift_node_certificates : Ensure CA certificate exists on openshift_ca_host] *** ok: [node02 -> node01] TASK [openshift_node_certificates : fail] ************************************** skipping: [node02] TASK [openshift_node_certificates : Check status of node certificates] ********* ok: [node02] => (item=system:node:node02.crt) ok: [node02] => (item=system:node:node02.key) ok: [node02] => (item=system:node:node02.kubeconfig) ok: [node02] => (item=ca.crt) ok: [node02] => (item=server.key) ok: [node02] => (item=server.crt) TASK [openshift_node_certificates : set_fact] ********************************** ok: [node02] TASK [openshift_node_certificates : Create openshift_generated_configs_dir if it does not exist] *** ok: [node02 -> node01] TASK [openshift_node_certificates : find] ************************************** ok: [node02 -> node01] TASK [openshift_node_certificates : Generate the node client config] *********** changed: [node02 -> node01] => (item=node02) TASK [openshift_node_certificates : Generate the node server certificate] ****** changed: [node02 -> node01] => (item=node02) TASK [openshift_node_certificates : Create a tarball of the node config directories] *** changed: [node02 -> node01] TASK [openshift_node_certificates : Retrieve the node config tarballs from the master] *** changed: [node02 -> node01] TASK [openshift_node_certificates : Ensure certificate directory exists] ******* ok: [node02] TASK [openshift_node_certificates : Unarchive the tarball on the node] ********* changed: [node02] TASK [openshift_node_certificates : Delete local temp directory] *************** ok: [node02 -> localhost] TASK [openshift_node_certificates : Copy OpenShift CA to system CA trust] ****** ok: [node02] => (item={u'cert': u'/etc/origin/node/ca.crt', u'id': u'openshift'}) PLAY [Disable excluders] ******************************************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/disable.yml for node02 TASK [openshift_excluder : Include verify_upgrade.yml when upgrading] ********** skipping: [node02] TASK [openshift_excluder : Disable excluders before the upgrade to remove older excluding expressions] *** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/unexclude.yml for node02 TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] TASK [openshift_excluder : Include install.yml] ******************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/install.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Include exclude.yml] ******************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/exclude.yml for node02 TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] TASK [openshift_excluder : Include unexclude.yml] ****************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/unexclude.yml for node02 TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** skipping: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] PLAY [Evaluate node groups] **************************************************** TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [Evaluate oo_containerized_master_nodes] ********************************** skipping: [localhost] => (item=node02) [WARNING]: Could not match supplied host pattern, ignoring: oo_containerized_master_nodes PLAY [Configure containerized nodes] ******************************************* skipping: no hosts matched PLAY [Configure nodes] ********************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_clock : Determine if chrony is installed] ********************** changed: [node02] [WARNING]: Consider using yum, dnf or zypper module rather than running rpm TASK [openshift_clock : Install ntp package] *********************************** skipping: [node02] TASK [openshift_clock : Start and enable ntpd/chronyd] ************************* changed: [node02] TASK [openshift_cloud_provider : Set cloud provider facts] ********************* skipping: [node02] TASK [openshift_cloud_provider : Create cloudprovider config dir] ************** skipping: [node02] TASK [openshift_cloud_provider : include the defined cloud provider files] ***** skipping: [node02] TASK [openshift_node : fail] *************************************************** skipping: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/dnsmasq_install.yml for node02 TASK [openshift_node : Check for NetworkManager service] *********************** ok: [node02] TASK [openshift_node : Set fact using_network_manager] ************************* ok: [node02] TASK [openshift_node : Install dnsmasq] **************************************** ok: [node02] TASK [openshift_node : ensure origin/node directory exists] ******************** ok: [node02] => (item=/etc/origin) changed: [node02] => (item=/etc/origin/node) TASK [openshift_node : Install node-dnsmasq.conf] ****************************** ok: [node02] TASK [openshift_node : include_tasks] ****************************************** skipping: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/dnsmasq.yml for node02 TASK [openshift_node : Install dnsmasq configuration] ************************** ok: [node02] TASK [openshift_node : Deploy additional dnsmasq.conf] ************************* skipping: [node02] TASK [openshift_node : Enable dnsmasq] ***************************************** ok: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/dnsmasq/network-manager.yml for node02 TASK [openshift_node : Install network manager dispatch script] **************** ok: [node02] TASK [openshift_node : Add iptables allow rules] ******************************* ok: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) ok: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) ok: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) ok: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove iptables rules] ********************************** TASK [openshift_node : Add firewalld allow rules] ****************************** skipping: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) skipping: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) skipping: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) skipping: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove firewalld allow rules] *************************** TASK [openshift_node : Update journald config] ********************************* included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/journald.yml for node02 TASK [openshift_node : Checking for journald.conf] ***************************** ok: [node02] TASK [openshift_node : Create journald persistence directories] **************** ok: [node02] TASK [openshift_node : Update journald setup] ********************************** ok: [node02] => (item={u'var': u'Storage', u'val': u'persistent'}) ok: [node02] => (item={u'var': u'Compress', u'val': True}) ok: [node02] => (item={u'var': u'SyncIntervalSec', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitInterval', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitBurst', u'val': 10000}) ok: [node02] => (item={u'var': u'SystemMaxUse', u'val': u'8G'}) ok: [node02] => (item={u'var': u'SystemKeepFree', u'val': u'20%'}) ok: [node02] => (item={u'var': u'SystemMaxFileSize', u'val': u'10M'}) ok: [node02] => (item={u'var': u'MaxRetentionSec', u'val': u'1month'}) ok: [node02] => (item={u'var': u'MaxFileSec', u'val': u'1day'}) ok: [node02] => (item={u'var': u'ForwardToSyslog', u'val': False}) ok: [node02] => (item={u'var': u'ForwardToWall', u'val': False}) TASK [openshift_node : Restart journald] *************************************** skipping: [node02] TASK [openshift_node : Disable swap] ******************************************* ok: [node02] TASK [openshift_node : include node installer] ********************************* included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/install.yml for node02 TASK [openshift_node : Install Node package, sdn-ovs, conntrack packages] ****** ok: [node02] => (item={u'name': u'origin-node-3.9.0'}) ok: [node02] => (item={u'name': u'origin-sdn-ovs-3.9.0', u'install': True}) ok: [node02] => (item={u'name': u'conntrack-tools'}) TASK [openshift_node : Pre-pull node image when containerized] ***************** skipping: [node02] TASK [openshift_node : Restart cri-o] ****************************************** changed: [node02] TASK [openshift_node : restart NetworkManager to ensure resolv.conf is present] *** skipping: [node02] TASK [openshift_node : sysctl] ************************************************* ok: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/registry_auth.yml for node02 TASK [openshift_node : Check for credentials file for registry auth] *********** skipping: [node02] TASK [openshift_node : Create credentials for registry auth] ******************* skipping: [node02] TASK [openshift_node : Create credentials for registry auth (alternative)] ***** skipping: [node02] TASK [openshift_node : Setup ro mount of /root/.docker for containerized hosts] *** skipping: [node02] TASK [openshift_node : include standard node config] *************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/config.yml for node02 TASK [openshift_node : Install the systemd units] ****************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/systemd_units.yml for node02 TASK [openshift_node : Install Node service file] ****************************** ok: [node02] TASK [openshift_node : include node deps docker service file] ****************** skipping: [node02] TASK [openshift_node : include ovs service environment file] ******************* skipping: [node02] TASK [openshift_node : include_tasks] ****************************************** skipping: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/config/configure-node-settings.yml for node02 TASK [openshift_node : Configure Node settings] ******************************** ok: [node02] => (item={u'regex': u'^OPTIONS=', u'line': u'OPTIONS=--loglevel=2 '}) ok: [node02] => (item={u'regex': u'^CONFIG_FILE=', u'line': u'CONFIG_FILE=/etc/origin/node/node-config.yaml'}) ok: [node02] => (item={u'regex': u'^IMAGE_VERSION=', u'line': u'IMAGE_VERSION=v3.9.0'}) TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/config/configure-proxy-settings.yml for node02 TASK [openshift_node : Configure Proxy Settings] ******************************* skipping: [node02] => (item={u'regex': u'^HTTP_PROXY=', u'line': u'HTTP_PROXY='}) skipping: [node02] => (item={u'regex': u'^HTTPS_PROXY=', u'line': u'HTTPS_PROXY='}) skipping: [node02] => (item={u'regex': u'^NO_PROXY=', u'line': u'NO_PROXY=[],172.30.0.0/16,10.128.0.0/14'}) TASK [openshift_node : Pull container images] ********************************** skipping: [node02] TASK [openshift_node : Start and enable openvswitch service] ******************* skipping: [node02] TASK [openshift_node : set_fact] *********************************************** ok: [node02] TASK [openshift_node : file] *************************************************** skipping: [node02] TASK [openshift_node : Create the Node config] ********************************* changed: [node02] TASK [openshift_node : Configure Node Environment Variables] ******************* TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) TASK [openshift_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_node : Start and enable node dep] ****************************** skipping: [node02] TASK [openshift_node : Start and enable node] ********************************** ok: [node02] TASK [openshift_node : Dump logs from node service if it failed] *************** skipping: [node02] TASK [openshift_node : Abort if node failed to start] ************************** skipping: [node02] TASK [openshift_node : set_fact] *********************************************** ok: [node02] TASK [openshift_node : NFS storage plugin configuration] *********************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/storage_plugins/nfs.yml for node02 TASK [openshift_node : Install NFS storage plugin dependencies] **************** ok: [node02] TASK [openshift_node : Check for existence of nfs sebooleans] ****************** ok: [node02] => (item=virt_use_nfs) ok: [node02] => (item=virt_sandbox_use_nfs) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:19:36.011191', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.013710', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:19:35.997481', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:19:37.844955', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.009049', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:19:37.835906', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:19:36.011191', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.013710', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:19:35.997481', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:19:37.844955', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.009049', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:19:37.835906', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : GlusterFS storage plugin configuration] ***************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/storage_plugins/glusterfs.yml for node02 TASK [openshift_node : Install GlusterFS storage plugin dependencies] ********** ok: [node02] TASK [openshift_node : Check for existence of fusefs sebooleans] *************** ok: [node02] => (item=virt_use_fusefs) ok: [node02] => (item=virt_sandbox_use_fusefs) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:19:44.815436', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.006687', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:19:44.808749', '_ansible_ignore_errors': None, 'failed': False}) ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:19:46.095612', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.006407', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:19:46.089205', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:19:44.815436', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.006687', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:19:44.808749', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:19:46.095612', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.006407', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:19:46.089205', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Ceph storage plugin configuration] ********************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/storage_plugins/ceph.yml for node02 TASK [openshift_node : Install Ceph storage plugin dependencies] *************** ok: [node02] TASK [openshift_node : iSCSI storage plugin configuration] ********************* included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/storage_plugins/iscsi.yml for node02 TASK [openshift_node : Install iSCSI storage plugin dependencies] ************** ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=device-mapper-multipath) TASK [openshift_node : restart services] *************************************** ok: [node02] => (item=multipathd) ok: [node02] => (item=rpcbind) TASK [openshift_node : Template multipath configuration] *********************** changed: [node02] TASK [openshift_node : Enable multipath] *************************************** changed: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/config/workaround-bz1331590-ovs-oom-fix.yml for node02 TASK [openshift_node : Create OpenvSwitch service.d directory] ***************** ok: [node02] TASK [openshift_node : Install OpenvSwitch service OOM fix] ******************** ok: [node02] TASK [tuned : Check for tuned package] ***************************************** ok: [node02] TASK [tuned : Set tuned OpenShift variables] *********************************** ok: [node02] TASK [tuned : Ensure directory structure exists] ******************************* ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.737197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.737197, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.736197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.736197, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.737197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.737197, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.7381968, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'recommend.conf', 'size': 268, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.737197, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 593, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.736197, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': u's0', 'seuser': u'system_u', 'serole': u'object_r', 'ctime': 1526915770.737197, 'state': u'file', 'gid': 0, 'mode': u'0644', 'mtime': 1523891799.0, 'owner': u'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': u'usr_t'}) TASK [tuned : Ensure files are populated from templates] *********************** skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.737197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.737197, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.736197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.736197, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.737197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.737197, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.7381968, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'recommend.conf', 'size': 268, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.737197, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 593, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.736197, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.737197, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) TASK [tuned : Make tuned use the recommended tuned profile on restart] ********* changed: [node02] => (item=/etc/tuned/active_profile) changed: [node02] => (item=/etc/tuned/profile_mode) TASK [tuned : Restart tuned service] ******************************************* changed: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Install logrotate] ******* ok: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Setup logrotate.d scripts] *** RUNNING HANDLER [openshift_node : restart node] ******************************** changed: [node02] PLAY [create additional node network plugin groups] **************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [group_by] **************************************************************** ok: [node02] TASK [group_by] **************************************************************** ok: [node02] TASK [group_by] **************************************************************** ok: [node02] TASK [group_by] **************************************************************** ok: [node02] TASK [group_by] **************************************************************** ok: [node02] [WARNING]: Could not match supplied host pattern, ignoring: oo_nodes_use_flannel [WARNING]: Could not match supplied host pattern, ignoring: oo_nodes_use_calico [WARNING]: Could not match supplied host pattern, ignoring: oo_nodes_use_contiv [WARNING]: Could not match supplied host pattern, ignoring: oo_nodes_use_kuryr PLAY [etcd_client node config] ************************************************* skipping: no hosts matched PLAY [Additional node config] ************************************************** skipping: no hosts matched PLAY [Additional node config] ************************************************** skipping: no hosts matched [WARNING]: Could not match supplied host pattern, ignoring: oo_nodes_use_nuage PLAY [Additional node config] ************************************************** skipping: no hosts matched PLAY [Configure Contiv masters] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Configure rest of Contiv nodes] ****************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] ok: [node02] PLAY [Configure Kuryr node] **************************************************** skipping: no hosts matched PLAY [Additional node config] ************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_manage_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_manage_node : Wait for Node Registration] ********************** ok: [node02 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_manage_node/tasks/config.yml for node02 TASK [openshift_manage_node : Set node schedulability] ************************* ok: [node02 -> node01] TASK [openshift_manage_node : Label nodes] ************************************* changed: [node02 -> node01] TASK [Create group for deployment type] **************************************** ok: [node02] PLAY [Re-enable excluder if it was previously enabled] ************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/enable.yml for node02 TASK [openshift_excluder : Install excluders] ********************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/install.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Enable excluders] *********************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/exclude.yml for node02 TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] PLAY [Node Install Checkpoint End] ********************************************* TASK [Set Node install 'Complete'] ********************************************* ok: [node01] PLAY RECAP ********************************************************************* localhost : ok=25 changed=0 unreachable=0 failed=0 node01 : ok=42 changed=0 unreachable=0 failed=0 node02 : ok=208 changed=28 unreachable=0 failed=0 INSTALLER STATUS *************************************************************** Initialization : Complete (0:01:09) Node Install : Complete (0:04:42) + set +e + crio=false + grep crio /root/inventory openshift_use_crio=true openshift_crio_systemcontainer_image_override=docker.io/kubevirtci/crio:1.9.10 + '[' 0 -eq 0 ']' + crio=true + set -e + cat + ansible-playbook -i /root/inventory post_deployment_configuration --extra-vars=crio=true PLAY [new_nodes] *************************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Restart openvswitch service] ********************************************* changed: [node02] PLAY [nodes, new_nodes] ******************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] ok: [node02] TASK [replace] ***************************************************************** changed: [node01] changed: [node02] TASK [replace] ***************************************************************** changed: [node02] changed: [node01] TASK [service] ***************************************************************** changed: [node01] changed: [node02] PLAY RECAP ********************************************************************* node01 : ok=4 changed=3 unreachable=0 failed=0 node02 : ok=6 changed=4 unreachable=0 failed=0 Sending file modes: C0755 217813128 oc Sending file modes: C0600 5645 admin.kubeconfig Cluster "node01:8443" set. Cluster "node01:8443" set. ++ kubectl get nodes --no-headers ++ cluster/kubectl.sh get nodes --no-headers ++ grep -v Ready + '[' -n '' ']' + echo 'Nodes are ready:' Nodes are ready: + kubectl get nodes + cluster/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION node01 Ready master 15d v1.9.1+a0ce1bc657 node02 Ready 3m v1.9.1+a0ce1bc657 + make cluster-sync ./cluster/build.sh Building ... sha256:8b33043feeb10b27572d8053bdc7179a9496c83ded2be2221ae64b435ce6e0af go version go1.10 linux/amd64 go version go1.10 linux/amd64 make[1]: Entering directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt' hack/dockerized "./hack/check.sh && KUBEVIRT_VERSION= ./hack/build-go.sh install " && ./hack/build-copy-artifacts.sh sha256:8b33043feeb10b27572d8053bdc7179a9496c83ded2be2221ae64b435ce6e0af go version go1.10 linux/amd64 go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/build-docker.sh build Sending build context to Docker daemon 36.15 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b0110ac54e8d Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-controller ---> Using cache ---> ccd49c19cf6c Step 4/8 : WORKDIR /home/virt-controller ---> Using cache ---> 75dc311c8fb2 Step 5/8 : USER 1001 ---> Using cache ---> 698bd8b43680 Step 6/8 : COPY virt-controller /virt-controller ---> 1ce72091508a Removing intermediate container cbbcc410d1de Step 7/8 : ENTRYPOINT /virt-controller ---> Running in 0c1d86cc2cec ---> b8f1c9c1c771 Removing intermediate container 0c1d86cc2cec Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "virt-controller" '' ---> Running in 686cec8dae75 ---> 865d01c802e4 Removing intermediate container 686cec8dae75 Successfully built 865d01c802e4 Sending build context to Docker daemon 38.08 MB Step 1/14 : FROM kubevirt/libvirt:3.7.0 ---> 60c80c8f7523 Step 2/14 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 0a530d8cad4e Step 3/14 : RUN dnf -y install socat genisoimage util-linux libcgroup-tools ethtool sudo && dnf -y clean all && test $(id -u qemu) = 107 # make sure that the qemu user really is 107 ---> Using cache ---> d9efcdeb20bf Step 4/14 : COPY sock-connector /sock-connector ---> Using cache ---> 929dfaf3db96 Step 5/14 : COPY sh.sh /sh.sh ---> Using cache ---> d5002fc25dc4 Step 6/14 : COPY virt-launcher /virt-launcher ---> 59b53214079b Removing intermediate container 37a453f88f12 Step 7/14 : COPY kubevirt-sudo /etc/sudoers.d/kubevirt ---> b99884139041 Removing intermediate container 6c4b92bd10da Step 8/14 : RUN chmod 0640 /etc/sudoers.d/kubevirt ---> Running in 8766260d0390  ---> b8d2c18b92cd Removing intermediate container 8766260d0390 Step 9/14 : RUN rm -f /libvirtd.sh ---> Running in b3d984211fcb  ---> edf4774720d2 Removing intermediate container b3d984211fcb Step 10/14 : COPY libvirtd.sh /libvirtd.sh ---> 30c7e6e9c8df Removing intermediate container 64db17250156 Step 11/14 : RUN chmod a+x /libvirtd.sh ---> Running in 8ce83ef574f2  ---> 426eb6a7175c Removing intermediate container 8ce83ef574f2 Step 12/14 : COPY entrypoint.sh /entrypoint.sh ---> eb43efd3bc6b Removing intermediate container d61fa63b2186 Step 13/14 : ENTRYPOINT /entrypoint.sh ---> Running in 5dd2ef5493e8 ---> 2cc035430cb1 Removing intermediate container 5dd2ef5493e8 Step 14/14 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "virt-launcher" '' ---> Running in 5e9a16e53cda ---> c8c1f6e03ae1 Removing intermediate container 5e9a16e53cda Successfully built c8c1f6e03ae1 Sending build context to Docker daemon 36.71 MB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b0110ac54e8d Step 3/5 : COPY virt-handler /virt-handler ---> 8396a4efb885 Removing intermediate container 01c33bc8d626 Step 4/5 : ENTRYPOINT /virt-handler ---> Running in f330c63f5995 ---> 23c21add4211 Removing intermediate container f330c63f5995 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "virt-handler" '' ---> Running in 51c399858b2b ---> 5cdfbcf3c815 Removing intermediate container 51c399858b2b Successfully built 5cdfbcf3c815 Sending build context to Docker daemon 36.87 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b0110ac54e8d Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-api ---> Using cache ---> 4bf1b014ced0 Step 4/8 : WORKDIR /home/virt-api ---> Using cache ---> 2291af9afcbb Step 5/8 : USER 1001 ---> Using cache ---> 94008214adaa Step 6/8 : COPY virt-api /virt-api ---> a7c59f799c4d Removing intermediate container 0a87330c928e Step 7/8 : ENTRYPOINT /virt-api ---> Running in c0c2f1dd0dab ---> 27bd5bba3cd9 Removing intermediate container c0c2f1dd0dab Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "virt-api" '' ---> Running in a41f2400b0ab ---> 3c72b7baf9ae Removing intermediate container a41f2400b0ab Successfully built 3c72b7baf9ae Sending build context to Docker daemon 6.656 kB Step 1/10 : FROM fedora:27 ---> 9110ae7f579f Step 2/10 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b0110ac54e8d Step 3/10 : ENV container docker ---> Using cache ---> 09a0eb53efc4 Step 4/10 : RUN dnf -y install scsi-target-utils bzip2 e2fsprogs ---> Using cache ---> ec7fa13faf0e Step 5/10 : RUN mkdir -p /images ---> Using cache ---> eda3b84d1450 Step 6/10 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /images/1-alpine.img ---> Using cache ---> c87921a2a927 Step 7/10 : ADD run-tgt.sh / ---> Using cache ---> 3fe7ebf8a604 Step 8/10 : EXPOSE 3260 ---> Using cache ---> 53d30acf30c6 Step 9/10 : CMD /run-tgt.sh ---> Using cache ---> cf16e6eb9867 Step 10/10 : LABEL "iscsi-demo-target-tgtd" '' "kubevirt-functional-tests-openshift-3.9-crio-release1" '' ---> Running in 0f914f9be7d5 ---> 0e1a9ea136da Removing intermediate container 0f914f9be7d5 Successfully built 0e1a9ea136da Sending build context to Docker daemon 2.56 kB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b0110ac54e8d Step 3/5 : ENV container docker ---> Using cache ---> 09a0eb53efc4 Step 4/5 : RUN dnf -y install procps-ng nmap-ncat && dnf -y clean all ---> Using cache ---> b56a90c7fd64 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "vm-killer" '' ---> Running in 674bffc55d50 ---> 5eddfb72c666 Removing intermediate container 674bffc55d50 Successfully built 5eddfb72c666 Sending build context to Docker daemon 5.12 kB Step 1/7 : FROM debian:sid ---> bcec0ae8107e Step 2/7 : MAINTAINER "David Vossel" \ ---> Using cache ---> 313c78fd9693 Step 3/7 : ENV container docker ---> Using cache ---> a0801069f3af Step 4/7 : RUN apt-get update && apt-get install -y bash curl bzip2 qemu-utils && mkdir -p /disk && rm -rf /var/lib/apt/lists/* ---> Using cache ---> 003548c7ad90 Step 5/7 : ADD entry-point.sh / ---> Using cache ---> cfb88c6b6cb0 Step 6/7 : CMD /entry-point.sh ---> Using cache ---> 791630a9414e Step 7/7 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "registry-disk-v1alpha" '' ---> Running in 4d3fb751b6db ---> 0a86ebc161e7 Removing intermediate container 4d3fb751b6db Successfully built 0a86ebc161e7 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:36162/kubevirt/registry-disk-v1alpha:devel ---> 0a86ebc161e7 Step 2/4 : MAINTAINER "David Vossel" \ ---> Running in 3e8d9f53fbc4 ---> 93c3a1493f76 Removing intermediate container 3e8d9f53fbc4 Step 3/4 : RUN curl https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img > /disk/cirros.img ---> Running in 34705bee0ede  % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 6 12.1M 6 848k 0 0 708k 0 0:00:17 0:00:01 0:00:16 708k 71 12.1M 71 8912k 0 0 4023k  0 0:00:03 0:00:02 0:00:01 4023k 100 12.1M 100 12.1M 0 0 4776k  0 0:00:02 0:00:02 --:--:-- 4774k  ---> c509f784c2b6 Removing intermediate container 34705bee0ede Step 4/4 : LABEL "cirros-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.9-crio-release1" '' ---> Running in 2aa6922b4fe9 ---> 70233b1cb8a7 Removing intermediate container 2aa6922b4fe9 Successfully built 70233b1cb8a7 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:36162/kubevirt/registry-disk-v1alpha:devel ---> 0a86ebc161e7 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Running in a44003bc68ea ---> 3f963574ded8 Removing intermediate container a44003bc68ea Step 3/4 : RUN curl -g -L https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 > /disk/fedora.qcow2 ---> Running in dd5b6db76be8   % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0  0 0 0 0 0 --:--:-- --:--:-- --:--:--  0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0  0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0 0 221M 0 178k 0 0 52405 0 1:13:53 0:00:03 1:13:50 60948 0 221M 0 675k 0 0 151k 0 0:25:00 0:00:04 0:24:56 169k 0 221M 0 1241k 0 0 227k 0 0:16:36 0:00:05 0:16:31 250k 0 221M 0 1821k 0 0 283k  0 0:13:20 0:00:06 0:13:14 388k 1 221M 1 2463k 0 0 328k 0 0:11:30 0:00:07 0:11:23 541k 1 221M 1 3056k 0 0 360k 0 0:10:29 0:00:08 0:10:21 577k 1 221M 1 3724k 0 0 393k 0 0:09:36 0:00:09 0:09:27 611k 1 221M  1 4338k 0 0 415k 0 0:09:05 0:00:10 0:08:55 622k 2 221M 2 5092k 0 0 443k 0 0:08:32 0:00:11 0:08:21 646k 2 221M 2 5826k 0 0 467k 0 0:08:05 0:00:12 0:07:53 676k 2 221M 2 6561k 0 0 488k 0 0:07:44 0:00:13 0:07:31 705k 3 221M 3 7146k 0 0 493k 0 0:07:39 0:00:14 0:07:25 682k 3 221M 3 7663k 0 0 496k 0 0:07:37 0:00:15 0:07:22 663k 3 221M 3 8116k 0 0 492k 0 0:07:40 0:00:16 0:07:24 607k 3 221M 3 8536k 0 0 489k 0 0:07:43 0:00:17 0:07:26 544k 3 221M 3 8908k 0 0 483k 0 0:07:49 0:00:18 0:07:31 470k 4 221M 4 9241k 0 0 475k 0 0:07:57 0:00:19 0:07:38 420k 4 221M 4 9575k 0 0 467k 0 0:08:04 0:00:20 0:07:44 380k 4 221M 4 9930k 0 0 463k 0 0:08:09 0:00:21 0:07:48 364k 4 221M 4 10.0M 0 0 457k 0 0:08:15 0:00:22 0:07:53 346k 4 221M 4 10.3M 0 0 453k 0 0:08:20 0:00:23 0:07:57 344k 4 221M 4 10.7M 0 0 448k 0 0:08:25 0:00:24 0:08:01 346k 5 221M 5 11.0M 0 0 445k 0 0:08:28 0:00:25 0:08:03 355k 5 221M 5 11.4M 0 0 443k 0 0:08:31 0:00:26 0:08:05 359k 5 221M 5 11.8M 0 0 441k 0 0:08:34 0:00:27 0:08:07 368k 5 221M 5 12.1M 0 0 438k 0 0:08:36 0:00:28 0:08:08 369k 5 221M 5 12.5M 0 0 436k 0 0:08:40 0:00:29 0:08:11 374k 5 221M 5 12.9M 0 0 435k 0 0:08:40 0:00:30 0:08:10 382k 6 221M 6 13.3M 0  0 434k 0 0:08:41 0:00:31 0:08:10 388k 6 221M 6 13.7M 0 0 434k 0 0:08:41 0:00:32 0:08:09 397k 6 221M 6 14.1M 0 0 433k 0 0:08:43 0:00:33 0:08:10 402k 6 221M 6 14.5M 0 0 433k  0 0:08:43 0:00:34 0:08:09 414k 6 221M 6 14.9M 0 0 433k 0 0:08:43 0:00:35 0:08:08 419k 6 221M 6 15.4M 0 0 434k 0 0:08:42 0:00:36 0:08:06 429k  7 221M 7 15.8M 0 0 434k 0 0:08:42 0:00:37 0:08:05 433k 7 221M 7 16.3M 0 0 436k 0 0:08:39 0:00:38 0:08:01 455k 7 221M 7 17.0M 0 0 441k 0 0:08:33 0:00:39 0:07:54 499k 7 221M 7 17.5M 0 0 444k 0 0:08:30 0:00:40 0:07:50 523k 8 221M 8 17.8M 0 0 441k 0 0:08:34 0:00:41 0:07:53 493k 8 221M 8 18.1M 0 0 438k 0 0:08:37 0:00:42 0:07:55 464k 8 221M 8 18.4M 0 0 435k 0 0:08:41 0:00:43 0:07:58 425k 8 221M 8 18.8M 0 0 433k 0 0:08:43 0:00:44 0:07:59 368k 8 221M 8 19.1M 0 0 432k 0 0:08:44 0:00:45 0:07:59 333k 8 221M 8 19.6M 0 0 433k 0 0:08:43 0:00:46  0:07:57 365k 9 221M 9 20.3M 0 0 438k 0 0:08:37 0:00:47 0:07:50 445k 9 221M 9 21.4M 0 0 452k 0 0:08:21 0:00:48 0:07:33 605k 10 221M 10 22.9M 0 0 476k 0 0:07:56 0:00:49 0:07:07 856k 11 221M 11 25.3M 0 0 513k 0 0:07:21 0:00:50 0:06:31 1259k 12 221M 12 27.6M 0 0 551k 0 0:06:51 0:00:51 0:06:00 1645k 13 221M 13 30.2M 0 0 591k 0 0:06:23 0:00:52 0:05:31 2044k 14 221M 14 32.8M 0 0 628k 0 0:06:00 0:00:53 0:05:07 2308k 15 221M 15 34.0M 0 0 640k 0 0:05:53 0:00:54 0:04:59 2267k 15 221M 15 35.2M 0 0 651k 0 0:05:48 0:00:55 0:04:53 2042k 16 221M 16 36.2M 0 0 658k 0 0:05:44 0:00:56 0:04:48 1760k 16 221M 16 37.0M 0 0 661k 0 0:05:43 0:00:57 0:04:46 1391k 17 221M 17 37.7M 0 0 661k 0 0:05:43 0:00:58 0:04:45 1013k 17 221M 17 38.4M 0 0 661k 0 0:05:42 0:00:59 0:04:43 888k 17 221M 17 39.1M 0 0 662k 0 0:05:42 0:01:00 0:04:42 783k 17 221M 17 39.8M 0 0 663k 0 0:05:41 0:01:01 0:04:40 726k 18 221M 18 40.5M 0 0 664k 0 0:05:41 0:01:02 0:04:39 705k 18 221M 18 41.1M 0 0 664k 0 0:05:41 0:01:03 0:04:38  700k 18 221M 18 41.8M 0 0 664k 0 0:05:41 0:01:04 0:04:37 704k 19 221M 19 42.5M 0 0 665k 0 0:05:40 0:01:05 0:04:35 698k 19 221M 19 43.1M  0 0 665k 0 0:05:41 0:01:06 0:04:35 683k 19 221M 19 43.9M 0 0 666k 0 0:05:40 0:01:07 0:04:33 691k 20 221M 20 44.6M 0 0 668k 0 0:05:39 0:01:08 0:04:31 722k 20 221M 20 45.3M 0 0 669k 0 0:05:39 0:01:09 0:04:30 721k 20 221M 20 46.0M 0 0 668k 0 0:05:39 0:01:10 0:04:29 717k 21 221M 21 46.7M 0 0 670k 0 0:05:38 0:01:11 0:04:27 737k 21 221M 21 47.5M 0 0 671k 0 0:05:37 0:01:12 0:04:25 742k 21 221M 21 48.3M 0 0 673k 0 0:05:37 0:01:13 0:04:24 737k 22 221M 22 48.7M 0 0 670k 0 0:05:38 0:01:14 0:04:24 694k 22 221M 22 49.1M 0 0 667k 0 0:05:39 0:01:15 0:04:24 651k 22 221M 22 49.6M 0 0 665k 0 0:05:40 0:01:16 0:04:24 597k 22 221M 22 50.1M 0 0 663k 0 0:05:42 0:01:17 0:04:25 537k 22 221M 22 50.6M 0 0 661k 0 0:05:43 0:01:18 0:04:25 481k 23 221M 23 51.2M 0 0 660k 0 0:05:43 0:01:19 0:04:24 507k 23 221M 23 51.9M 0 0 660k 0 0:05:43 0:01:20 0:04:23 557k 23 221M 23 52.6M 0 0 661k 0 0:05:42 0:01:21 0:04:21 608k 24 221M 24 53.4M 0 0 663k 0 0:05:41 0:01:22 0:04:19 672k 24 221M 24 54.2M 0 0 665k 0 0:05:40 0:01:23 0:04:17 740k 24 221M 24 55.0M 0 0 667k 0 0:05:39 0:01:24 0:04:15 784k 25 221M 25 55.8M 0 0 669k 0 0:05:39 0:01:25 0:04:14 800k 25 221M 25 56.5M 0 0 670k 0 0:05:38 0:01:26 0:04:12 801k 25 221M 25 57.2M 0 0 670k 0 0:05:38 0:01:27 0:04:11 782k 26 221M 26 57.9M 0 0 670k  0 0:05:38 0:01:28 0:04:10 751k 26 221M 26 58.5M 0 0 669k 0 0:05:38 0:01:29 0:04:09 703k 26  221M 26 58.9M 0 0 667k 0 0:05:39 0:01:30 0:04:09 646k 26 221M 26 59.5M 0 0 666k 0 0:05:40 0:01:31 0:04:09 605k 27 221M 27 60.0M 0 0 664k 0 0:05:41 0:01:32 0:04:09 567k 27 221M 27 60.5M 0 0 663k 0  0:05:41 0:01:33 0:04:08 542k 27 221M 27 61.1M 0 0 662k 0 0:05:42 0:01:34 0:04:08 532k 27 221M 27 61.6M 0 0 661k  0 0:05:43 0:01:35 0:04:08 539k 27 221M 27 61.9M 0 0 657k 0 0:05:44 0:01:36 0:04:08 499k 28 221M 28 62.1M 0 0 653k 0 0:05:47 0:01:37 0:04:10 432k 28 221M 28 62.3M 0 0 648k 0 0:05:49 0:01:38 0:04:11 365k 28 221M 28 62.6M 0 0 644k 0 0:05:51 0:01:39 0:04:12 305k 28 221M 28 62.8M 0 0 641k 0 0:05:53 0:01:40 0:04:13 258k 28 221M 28 63.1M 0 0 637k 0 0:05:56 0:01:41 0:04:15 242k 28 221M 28 63.4M 0 0 634k 0 0:05:57 0:01:42 0:04:15 264k 28 221M 28 63.7M 0 0 630k 0 0:05:59 0:01:43 0:04:16 279k 28 221M 28 64.0M 0 0 627k 0 0:06:01 0:01:44 0:04:17 290k 29 221M 29 64.2M 0 0 624k  0 0:06:03 0:01:45 0:04:18 283k 29 221M 29 64.4M 0 0 620k 0 0:06:05 0:01:46 0:04:19 276k 29 221M 29 64.7M 0 0 616k 0 0:06:07 0:01:47 0:04:20 264k 29 221M 29 65.0M 0 0 613k 0 0:06:09 0:01:48 0:04:21 260k 29 221M 29 65.3M 0 0 611k 0 0:06:11 0:01:49 0:04:22 263k 29 221M 29 65.7M 0 0 609k  0 0:06:12 0:01:50 0:04:22 296k 29 221M 29 65.9M 0 0 606k 0 0:06:14 0:01:51 0:04:23 308k 29 221M 29 66.3M 0 0 603k 0 0:06:16 0:01:52 0:04:24 317k 30 221M 30 66.5M 0 0 600k 0 0:06:17 0:01:53 0:04:24 314k 30 221M 30 66.7M 0 0 597k 0 0:06:19 0:01:54 0:04:25 305k 30 221M 30 67.0M 0 0 594k 0 0:06:21 0:01:55 0:04:26 269k 30 221M 30 67.2M 0 0 591k  0 0:06:23 0:01:56 0:04:27 263k 30 221M 30 67.5M 0 0 588k 0 0:06:25 0:01:57 0:04:28 257k 30 221M 30 67.8M 0 0 586k 0 0:06:26 0:01:58 0:04:28 263k 30 221M 30 68.1M 0 0 584k 0 0:06:28 0:01:59 0:04:29 277k 30 221M 30 68.6M 0 0  583k  0 0:06:29 0:02:00 0:04:29 320k 31 221M 31 69.2M 0 0 584k 0 0:06:28 0:02:01 0:04:27 411k 31 221M 31 70.0M 0 0 586k 0 0:06:27 0:02:02 0:04:25 523k 32 221M 32 70.9M 0 0 588k 0 0:06:25 0:02:03 0:04:22 647k 32 221M 32 71.7M 0 0 590k 0 0:06:24 0:02:04 0:04:20 742k 32 221M 32 72.5M 0 0 592k 0 0:06:23 0:02:05 0:04:18 812k 33 221M 33 73.3M 0 0 594k 0 0:06:21 0:02:06 0:04:15 839k 33 221M 33 74.2M 0 0 596k 0 0:06:20 0:02:07 0:04:13 843k 33 221M 33 75.0M 0 0 598k 0 0:06:19 0:02:08 0:04:11 827k 34 221M 34 75.8M 0 0 600k 0 0:06:17 0:02:09 0:04:08 843k 34 221M 34 76.9M 0 0 604k 0 0:06:15 0:02:10 0:04:05 899k 35 221M 35 78.2M 0 0 609k 0 0:06:12 0:02:11 0:04:01 992k 36 221M 36 79.7M 0  0 616k 0 0:06:07 0:02:12 0:03:55 1139k 36 221M 36 81.2M 0 0 623k 0 0:06:03 0:02:13 0:03:50 1278k 37 221M 37 82.9M 0 0 631k 0 0:05:59 0:02:14 0:03:45 1443k 38 221M 38 84.4M 0 0 638k 0 0:05:55 0:02:15 0:03:40 1531k 38 221M 38 85.8M 0 0 644k 0 0:05:52 0:02:16 0:03:36 1562k 39 221M 39 87.3M 0 0 650k 0 0:05:48 0:02:17 0:03:31 1556k 40 221M 40 88.8M 0 0 657k 0 0:05:45 0:02:18 0:03:27 1556k 40 221M 40 90.3M 0 0 663k 0 0:05:41 0:02:19 0:03:22 1509k 41 221M 41 91.8M 0 0 669k 0 0:05:38 0:02:20 0:03:18 1520k 42 221M 42 93.4M 0 0 676k 0 0:05:35 0:02:21 0:03:14 1554k 42 221M 42 94.8M 0 0 681k 0 0:05:32 0:02:22 0:03:10 1506k 43 221M 43 95.8M 0 0 684k 0 0:05:31 0:02:23 0:03:08 1428k 43 221M 43 97.0M 0 0 687k 0 0:05:29 0:02:24 0:03:05 1373k 44 221M 44 98.2M 0 0 691k 0 0:05:27 0:02:25 0:03:02 1313k 44 221M 44 99.4M 0 0 695k 0 0:05:26 0:02:26 0:03:00 1223k 45 221M 45 100M 0 0 697k 0 0:05:25 0:02:27 0:02:58 1164k 45 221M 45 101M 0 0 700k 0 0:05:23 0:02:28 0:02:55 1174k 46 221M 46 102M 0 0 704k 0 0:05:22 0:02:29 0:02:53 1174k 46 221M 46 103M 0  0 707k 0 0:05:20 0:02:30 0:02:50 1167k 47 221M 47 105M 0 0 711k 0 0:05:19 0:02:31 0:02:48 1178k 48 221M 48 106M 0 0 714k 0 0:05:17 0:02:32 0:02:45 1229k 48 221M 48 107M 0 0 719k  0 0:05:15 0:02:33 0:02:42 1265k 49 221M 49 109M 0 0 724k 0 0:05:13 0:02:34 0:02:39 1317k 50 221M 50 110M 0 0 730k 0 0:05:10  0:02:35 0:02:35 1432k 50 221M 50 112M 0 0 737k  0 0:05:07 0:02:36 0:02:31 1531k 51 221M 51 114M 0 0 744k 0 0:05:04 0:02:37 0:02:27 1639k 52 221M 52 116M 0 0 750k 0 0:05:02 0:02:38 0:02:24 1712k 52 221M 52 117M 0 0 753k 0 0:05:01 0:02:39 0:02:22 1658k 53 221M 53 118M 0 0 756k 0 0:04:59 0:02:40 0:02:19 1554k 54 221M 54 119M 0 0 760k 0 0:04:58 0:02:41 0:02:17 1473k 54 221M 54 121M 0 0 763k 0 0:04:57 0:02:42 0:02:15 1372k 55 221M 55 122M 0 0 767k 0 0:04:55 0:02:43 0:02:12 1296k 55 221M 55 123M 0 0 769k 0 0:04:54 0:02:44 0:02:10 1292k 56 221M 56 124M 0 0 772k  0 0:04:53 0:02:45 0:02:08 1279k 56 221M 56 126M 0 0 775k 0  0:04:52 0:02:46 0:02:06 1264k 57 221M 57 126M 0 0 776k 0 0:04:52 0:02:47 0:02:05 1192k 57 221M 57 127M 0 0 776k  0 0:04:52 0:02:48 0:02:04 1097k 58 221M 58 128M 0 0 777k  0 0:04:51 0:02:49 0:02:02 1040k 58 221M 58 129M 0 0 778k 0 0:04:51 0:02:50 0:02:01 978k 58 221M 58 130M 0 0 779k 0 0:04:51 0:02:51 0:02:00 909k 59 221M 59 131M 0 0 780k 0  0:04:50 0:02:52 0:01:58 905k 59 221M 59 132M 0 0 781k 0 0:04:50 0:02:53 0:01:57 936k 60 221M 60 133M 0 0 784k 0 0:04:49 0:02:54 0:01:55 1008k 61 221M 61 135M 0 0 788k 0 0:04:47 0:02:55 0:01:52 1144k 61 221M  61 137M 0 0 796k 0 0:04:45 0:02:56 0:01:49 1369k 63 221M 63 139M 0 0 805k 0 0:04:41 0:02:57 0:01:44 1696k 63 221M 63 141M 0 0 813k 0 0:04:39 0:02:58 0:01:41 1904k 64 221M 64 143M 0 0 818k 0 0:04:37 0:02:59 0:01:38 1998k 65 221M 65 144M 0 0 819k 0 0:04:36 0:03:00 0:01:36 1903k 65 221M 65 145M 0 0 821k  0 0:04:36 0:03:01 0:01:35 1731k 66 221M 66 146M 0 0 823k 0 0:04:35 0:03:02 0:01:33 1454k 66 221M 66 147M 0 0 825k 0 0:04:34 0:03:03 0:01:31 1273k 67 221M 67 149M 0 0 827k 0 0:04:34 0:03:04 0:01:30 1167k 67 221M 67 150M 0 0 830k 0 0:04:33 0:03:05 0:01:28 1203k 68 221M 68 151M 0 0  832k  0 0:04:32 0:03:06 0:01:26 1239k 69 221M 69 153M 0 0 837k 0 0:04:30 0:03:07 0:01:23 1339k 70 221M 70 155M 0 0 844k 0 0:04:28 0:03:08 0:01:20 1522k 71 221M 71 157M 0 0 853k 0 0:04:25 0:03:09 0:01:16 1807k 72 221M 72 161M 0 0 866k 0 0:04:21 0:03:10 0:01:11 2209k 74 221M 74 164M 0 0 877k 0 0:04:18 0:03:11 0:01:07 2533k 75 221M 75 167M 0 0 890k 0 0:04:14 0:03:12 0:01:02 2859k 77 221M 77 170M 0 0 904k  0 0:04:10 0:03:13 0:00:57 3171k 78 221M 78 173M 0 0 911k 0 0:04:08 0:03:14 0:00:54 3120k 79 221M 79 175M 0 0 918k 0 0:04:06 0:03:15 0:00:51 2914k 80 221M 80 177M 0 0 925k 0  0:04:05 0:03:16 0:00:49 2760k 80 221M 80 179M 0 0 930k 0 0:04:03 0:03:17 0:00:46 2499k 81 221M 81 180M 0 0 932k 0 0:04:03 0:03:18 0:00:45 2035k 82 221M 82 182M 0 0 935k 0 0:04:02 0:03:19 0:00:43 1854k 82 221M 82 183M 0 0 938k 0 0:04:01 0:03:20 0:00:41 1714k 83 221M 83 185M 0 0 941k 0 0:04:00 0:03:21 0:00:39 1583k 84 221M 84 186M 0 0 944k 0 0:04:00 0:03:22 0:00:38 1491k 84 221M 84 188M 0 0 947k 0 0:03:59 0:03:23 0:00:36 1537k 85 221M 85 189M 0 0 951k 0 0:03:58 0:03:24 0:00:34 1572k 86 221M 86 191M 0 0 954k 0 0:03:57 0:03:25 0:00:32 1608k 87 221M 87 193M 0 0 957k 0 0:03:56 0:03:26 0:00:30 1614k 87 221M 87 194M 0 0 959k 0 0:03:56 0:03:27 0:00:29 1565k 88 221M 88 195M 0 0 961k 0 0:03:55 0:03:28 0:00:27 1534k 88 221M 88 196M 0 0 962k 0 0:03:55 0:03:29 0:00:26 1428k 89 221M 89 197M 0 0 962k 0 0:03:55 0:03:30 0:00:25 1260k 89 221M 89 198M 0 0 961k 0 0:03:55 0:03:31 0:00:24 1124k 89 221M 89 199M 0 0 961k 0 0:03:56 0:03:32 0:00:24 1025k 90 221M 90 200M 0 0 960k 0 0:03:56 0:03:33 0:00:23 926k 90 221M 90 201M 0 0 960k 0 0:03:56 0:03:34 0:00:22 875k 91 221M 91 201M 0 0 959k  0 0:03:56 0:03:35 0:00:21 866k 91 221M 91 202M 0 0 959k 0  0:03:56 0:03:36 0:00:20 869k 91 221M 91 203M 0 0 959k 0 0:03:56 0:03:37 0:00:19 874k 92 221M 92 204M 0 0 958k 0 0:03:56 0:03:38 0:00:18 850k 92 221M 92 205M 0 0 958k 0 0:03:56 0:03:39 0:00:17 870k 93 221M 93 206M 0 0 958k 0 0:03:56 0:03:40 0:00:16 901k 93 221M 93 207M 0 0 957k 0 0:03:56 0:03:41 0:00:15 879k 93 221M 93 207M 0 0 957k 0 0:03:56 0:03:42 0:00:14 881k 94  221M 94 208M 0 0 957k 0 0:03:57 0:03:43 0:00:14 907k 94 221M 94 209M 0 0 957k 0 0:03:57  0:03:44 0:00:13 901k 95 221M 95 210M 0 0 957k 0 0:03:57 0:03:45 0:00:12 899k 95 221M 95 211M 0 0 957k 0 0:03:57 0:03:46 0:00:11 924k 95 221M 95 212M 0 0 957k  0 0:03:56 0:03:47 0:00:09 957k 96 221M 96 213M 0 0 957k 0 0:03:56 0:03:48 0:00:08 988k 97 221M 97 214M 0 0 959k 0 0:03:56 0:03:49 0:00:07 1063k 97 221M 97 216M 0 0 961k 0 0:03:55 0:03:50 0:00:05 1175k 98 221M 98 218M 0 0 965k 0 0:03:54 0:03:51 0:00:03 1350k 99 221M 99 219M 0 0 969k 0 0:03:54 0:03:52 0:00:02 1499k 99 221M 99 221M 0 0 970k 0 0:03:53 0:03:53 --:--:-- 1523k 100 221M 100 221M 0 0 970k 0 0:03:53 0:03:53 --:--:-- 1538k  ---> b64aef97d199 Removing intermediate container dd5b6db76be8 Step 4/4 : LABEL "fedora-cloud-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.9-crio-release1" '' ---> Running in 867d5175886e ---> e76d2b8e0058 Removing intermediate container 867d5175886e Successfully built e76d2b8e0058 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:36162/kubevirt/registry-disk-v1alpha:devel ---> 0a86ebc161e7 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 3f963574ded8 Step 3/4 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /disk/alpine.iso ---> Running in dfca7c230c66  % Total % Received % Xferd Average Speed Time Time Time Current  Dload Upload  Total Spent Left Speed 0 0 0 0 0 0  0 0 --:--:-- --:--:-- --:--:--  0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0 1 37.0M 1 653k 0 0 186k  0 0:03:23 0:00:03 0:03:20 186k 27 37.0M 27 10.3M 0 0 2340k 0 0:00:16 0:00:04 0:00:12 2339k 68 37.0M 68 25.3M 0 0 4712k 0 0:00:08 0:00:05 0:00:03 5192k 100 37.0M 100 37.0M 0 0 6205k 0  0:00:06 0:00:06 --:--:-- 8245k  ---> 7c8d3742d181 Removing intermediate container dfca7c230c66 Step 4/4 : LABEL "alpine-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.9-crio-release1" '' ---> Running in 3c92cf8f702a ---> f99e92f8877d Removing intermediate container 3c92cf8f702a Successfully built f99e92f8877d Sending build context to Docker daemon 33.97 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b0110ac54e8d Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virtctl ---> Using cache ---> 1394197f657f Step 4/8 : WORKDIR /home/virtctl ---> Using cache ---> 6104617ebba9 Step 5/8 : USER 1001 ---> Using cache ---> fa0a18131006 Step 6/8 : COPY subresource-access-test /subresource-access-test ---> f9d7b7c70fb5 Removing intermediate container 87e9c4513e28 Step 7/8 : ENTRYPOINT /subresource-access-test ---> Running in 088158e369ab ---> 522d319389b4 Removing intermediate container 088158e369ab Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "subresource-access-test" '' ---> Running in 0a1222136774 ---> 1681710f99e4 Removing intermediate container 0a1222136774 Successfully built 1681710f99e4 Sending build context to Docker daemon 3.072 kB Step 1/9 : FROM fedora:27 ---> 9110ae7f579f Step 2/9 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b0110ac54e8d Step 3/9 : ENV container docker ---> Using cache ---> 09a0eb53efc4 Step 4/9 : RUN dnf -y install make git gcc && dnf -y clean all ---> Using cache ---> d5c23db469cf Step 5/9 : ENV GIMME_GO_VERSION 1.9.2 ---> Using cache ---> 8e66c78dedc0 Step 6/9 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> c95a7af438cb Step 7/9 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> d1bbb7a9fd1c Step 8/9 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/masterzen/winrm-cli ---> Using cache ---> 66ae1105f468 Step 9/9 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "winrmcli" '' ---> Running in 2593ba2ccc62 ---> 51e123de4943 Removing intermediate container 2593ba2ccc62 Successfully built 51e123de4943 hack/build-docker.sh push The push refers to a repository [localhost:36162/kubevirt/virt-controller] 88ec8d2e15ab: Preparing e8218823dc22: Preparing 39bae602f753: Preparing e8218823dc22: Pushed 88ec8d2e15ab: Pushed 39bae602f753: Pushed devel: digest: sha256:39fd0b4b7bca49d1a37644d9aa75595c5f7c3fb89599a275eb704d7b30607904 size: 948 The push refers to a repository [localhost:36162/kubevirt/virt-launcher] 2dfd63beecce: Preparing 1f94c1515c9b: Preparing 1f94c1515c9b: Preparing fb69a122d946: Preparing 1375dcd79bef: Preparing 8611f65955b9: Preparing dbbeda1c2bf3: Preparing 54be34ae881b: Preparing 0a2c53ad21cd: Preparing d232139a2650: Preparing 530cc55618cd: Preparing 34fa414dfdf6: Preparing a1359dc556dd: Preparing 0a2c53ad21cd: Waiting dbbeda1c2bf3: Waiting 490c7c373332: Preparing d232139a2650: Waiting 4b440db36f72: Preparing 530cc55618cd: Waiting 39bae602f753: Preparing 34fa414dfdf6: Waiting 54be34ae881b: Waiting 39bae602f753: Waiting 490c7c373332: Waiting 4b440db36f72: Waiting 1f94c1515c9b: Pushed 2dfd63beecce: Pushed fb69a122d946: Pushed 1375dcd79bef: Pushed 8611f65955b9: Pushed 54be34ae881b: Pushed 0a2c53ad21cd: Pushed 530cc55618cd: Pushed 34fa414dfdf6: Pushed a1359dc556dd: Pushed 490c7c373332: Pushed 39bae602f753: Mounted from kubevirt/virt-controller d232139a2650: Pushed dbbeda1c2bf3: Pushed 4b440db36f72: Pushed devel: digest: sha256:ef1585f80650daf8ad8978b4eda485640ae8e1ec9fa4eb2170d21310504092a6 size: 3651 The push refers to a repository [localhost:36162/kubevirt/virt-handler] 9cd3175c8243: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-launcher 9cd3175c8243: Pushed devel: digest: sha256:2005a234ea77062bb4b48db9bf4dc107b0d0bc7758b67ad362d897cb0116a2f6 size: 740 The push refers to a repository [localhost:36162/kubevirt/virt-api] a344a21481b9: Preparing fb95d38c6fbd: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-handler fb95d38c6fbd: Pushed a344a21481b9: Pushed devel: digest: sha256:2a49513daf0d5a6806b39ffc7c533e7e1afa424570e00b00f9274e0782265d0c size: 948 The push refers to a repository [localhost:36162/kubevirt/iscsi-demo-target-tgtd] 7d5ffdb95845: Preparing 67dca7d6b2ae: Preparing 8f131c2efff0: Preparing 8ae9c002a00c: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-api 7d5ffdb95845: Pushed 8f131c2efff0: Pushed 67dca7d6b2ae: Pushed 8ae9c002a00c: Pushed devel: digest: sha256:4cafa85217fafd972c238e32dc02814e963a02e855d4c09d6ce3b7fbb5081b50 size: 1368 The push refers to a repository [localhost:36162/kubevirt/vm-killer] 54dfdb30c356: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/iscsi-demo-target-tgtd 54dfdb30c356: Pushed devel: digest: sha256:151cd30d249ddbd1068b68261975c9ccd841c618a1bfc30aa03affa7504a812d size: 740 The push refers to a repository [localhost:36162/kubevirt/registry-disk-v1alpha] c6915653c205: Preparing fa58fb7b9535: Preparing 6709b2da72b8: Preparing c6915653c205: Pushed fa58fb7b9535: Pushed 6709b2da72b8: Pushed devel: digest: sha256:1e4c317c0921439039d7a35889e310fa3ad41ba4f6a808fcf5b9f8edc009042d size: 948 The push refers to a repository [localhost:36162/kubevirt/cirros-registry-disk-demo] 9b3a0ca9b630: Preparing c6915653c205: Preparing fa58fb7b9535: Preparing 6709b2da72b8: Preparing fa58fb7b9535: Mounted from kubevirt/registry-disk-v1alpha 6709b2da72b8: Mounted from kubevirt/registry-disk-v1alpha c6915653c205: Mounted from kubevirt/registry-disk-v1alpha 9b3a0ca9b630: Pushed devel: digest: sha256:2e02b0a1abf8c23fac244916c0c6bce953ae03508078c0d43bfd29eaed4c8ce6 size: 1160 The push refers to a repository [localhost:36162/kubevirt/fedora-cloud-registry-disk-demo] d2b53bade09c: Preparing c6915653c205: Preparing fa58fb7b9535: Preparing 6709b2da72b8: Preparing 6709b2da72b8: Mounted from kubevirt/cirros-registry-disk-demo fa58fb7b9535: Mounted from kubevirt/cirros-registry-disk-demo c6915653c205: Mounted from kubevirt/cirros-registry-disk-demo d2b53bade09c: Pushed devel: digest: sha256:15ff1e92c902272aa3787c6f0548775a2b82c1ead91d12de7b7714bc00836595 size: 1161 The push refers to a repository [localhost:36162/kubevirt/alpine-registry-disk-demo] 2d3e42536e45: Preparing c6915653c205: Preparing fa58fb7b9535: Preparing 6709b2da72b8: Preparing c6915653c205: Mounted from kubevirt/fedora-cloud-registry-disk-demo 6709b2da72b8: Mounted from kubevirt/fedora-cloud-registry-disk-demo fa58fb7b9535: Mounted from kubevirt/fedora-cloud-registry-disk-demo 2d3e42536e45: Pushed devel: digest: sha256:70a235f36e511bffe539b6c91c6ef444987492dca0086ba354ef804cfd9547d6 size: 1160 The push refers to a repository [localhost:36162/kubevirt/subresource-access-test] 4fe26b51d29f: Preparing 7a13304598ad: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/vm-killer 7a13304598ad: Pushed 4fe26b51d29f: Pushed devel: digest: sha256:96a7587096552e82516bdc6eea6935013730e9dc80c85d50f000e5ff6bae26a6 size: 948 The push refers to a repository [localhost:36162/kubevirt/winrmcli] e373097a073f: Preparing c4e78913515e: Preparing 25a5cce12702: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/subresource-access-test e373097a073f: Pushed 25a5cce12702: Pushed c4e78913515e: Pushed devel: digest: sha256:031bfa55ea7fd8f88796b4a9e9de93c67de1a5ef39fee332e4c109bf1ab2a7ec size: 1165 make[1]: Leaving directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt' Done ./cluster/clean.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_NUM_NODES=2 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.9-crio-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.9-crio-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.9-crio-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.5.1-alpha.2-47-g81420d3 ++ KUBEVIRT_VERSION=v0.5.1-alpha.2-47-g81420d3 + source cluster/os-3.9.0-crio/provider.sh ++ set -e ++ source cluster/os-3.9.0/provider.sh +++ set -e +++ image=os-3.9.0@sha256:234b3ae5c335c9fa32fa3bc01d5833f8f4d45420d82a8f8b12adc02687eb88b1 +++ source cluster/ephemeral-provider-common.sh ++++ set -e ++++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' ++ image=os-3.9.0-crio@sha256:107d03dad4da6957e28774b121a45e177f31d7b4ad43c6eab7b24d467e59e213 + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ source hack/config-default.sh source hack/config-os-3.9.0-crio.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/iscsi-demo-target-tgtd images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ kubeconfig=cluster/vagrant/.kubeconfig +++ namespace=kube-system ++ test -f hack/config-provider-os-3.9.0-crio.sh ++ source hack/config-provider-os-3.9.0-crio.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/cluster/os-3.9.0-crio/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/cluster/os-3.9.0-crio/.kubectl +++ docker_prefix=localhost:36162/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Cleaning up ...' Cleaning up ... + cluster/kubectl.sh get vmis --all-namespaces -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,FINALIZERS:.metadata.finalizers --no-headers + grep foregroundDeleteVirtualMachine + read p the server doesn't have a resource type "vmis" + _kubectl delete ds -l kubevirt.io -n kube-system --cascade=false --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=libvirt --force --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=virt-handler --force --grace-period 0 No resources found + namespaces=(default ${namespace}) + for i in '${namespaces[@]}' + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete deployment -l kubevirt.io No resources found + _kubectl -n default delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete rs -l kubevirt.io No resources found + _kubectl -n default delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete services -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n default delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete secrets -l kubevirt.io No resources found + _kubectl -n default delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete pv -l kubevirt.io No resources found + _kubectl -n default delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete pvc -l kubevirt.io No resources found + _kubectl -n default delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete ds -l kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n default delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete pods -l kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n default delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete rolebinding -l kubevirt.io No resources found + _kubectl -n default delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete roles -l kubevirt.io No resources found + _kubectl -n default delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete clusterroles -l kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig ++ KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig ++ cluster/os-3.9.0-crio/.kubectl -n default get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + for i in '${namespaces[@]}' + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete deployment -l kubevirt.io No resources found + _kubectl -n kube-system delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete rs -l kubevirt.io No resources found + _kubectl -n kube-system delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete services -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n kube-system delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete secrets -l kubevirt.io No resources found + _kubectl -n kube-system delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete pv -l kubevirt.io No resources found + _kubectl -n kube-system delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete pvc -l kubevirt.io No resources found + _kubectl -n kube-system delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete ds -l kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n kube-system delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete pods -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete rolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete roles -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete clusterroles -l kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io ++ export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig ++ KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig ++ wc -l ++ cluster/os-3.9.0-crio/.kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + sleep 2 + echo Done Done ./cluster/deploy.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_NUM_NODES=2 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.9-crio-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.9-crio-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.9-crio-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.5.1-alpha.2-47-g81420d3 ++ KUBEVIRT_VERSION=v0.5.1-alpha.2-47-g81420d3 + source cluster/os-3.9.0-crio/provider.sh ++ set -e ++ source cluster/os-3.9.0/provider.sh +++ set -e +++ image=os-3.9.0@sha256:234b3ae5c335c9fa32fa3bc01d5833f8f4d45420d82a8f8b12adc02687eb88b1 +++ source cluster/ephemeral-provider-common.sh ++++ set -e ++++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' ++ image=os-3.9.0-crio@sha256:107d03dad4da6957e28774b121a45e177f31d7b4ad43c6eab7b24d467e59e213 + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ source hack/config-default.sh source hack/config-os-3.9.0-crio.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/iscsi-demo-target-tgtd images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ kubeconfig=cluster/vagrant/.kubeconfig +++ namespace=kube-system ++ test -f hack/config-provider-os-3.9.0-crio.sh ++ source hack/config-provider-os-3.9.0-crio.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/cluster/os-3.9.0-crio/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/cluster/os-3.9.0-crio/.kubectl +++ docker_prefix=localhost:36162/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Deploying ...' Deploying ... + [[ -z openshift-3.9-crio-release ]] + [[ openshift-3.9-crio-release =~ .*-dev ]] + [[ openshift-3.9-crio-release =~ .*-release ]] + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/demo-content.yaml =~ .*demo.* ]] + continue + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml =~ .*demo.* ]] + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml clusterrole "kubevirt.io:admin" created clusterrole "kubevirt.io:edit" created clusterrole "kubevirt.io:view" created serviceaccount "kubevirt-apiserver" created clusterrolebinding "kubevirt-apiserver" created clusterrolebinding "kubevirt-apiserver-auth-delegator" created rolebinding "kubevirt-apiserver" created role "kubevirt-apiserver" created clusterrole "kubevirt-apiserver" created clusterrole "kubevirt-controller" created serviceaccount "kubevirt-controller" created serviceaccount "kubevirt-privileged" created clusterrolebinding "kubevirt-controller" created clusterrolebinding "kubevirt-controller-cluster-admin" created clusterrolebinding "kubevirt-privileged-cluster-admin" created clusterrole "kubevirt.io:default" created clusterrolebinding "kubevirt.io:default" created service "virt-api" created deployment "virt-api" created deployment "virt-controller" created daemonset "virt-handler" created customresourcedefinition "virtualmachineinstances.kubevirt.io" created customresourcedefinition "virtualmachineinstancereplicasets.kubevirt.io" created customresourcedefinition "virtualmachineinstancepresets.kubevirt.io" created customresourcedefinition "offlinevirtualmachines.kubevirt.io" created + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R persistentvolumeclaim "disk-alpine" created persistentvolume "iscsi-disk-alpine" created persistentvolumeclaim "disk-custom" created persistentvolume "iscsi-disk-custom" created daemonset "iscsi-demo-target-tgtd" created serviceaccount "kubevirt-testing" created clusterrolebinding "kubevirt-testing-cluster-admin" created + '[' os-3.9.0-crio = vagrant-openshift ']' + [[ os-3.9.0-crio =~ os-3.9.0.* ]] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-controller"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-testing"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-privileged"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-apiserver"] + _kubectl adm policy add-scc-to-user privileged admin + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl adm policy add-scc-to-user privileged admin scc "privileged" added to: ["admin"] + echo Done Done ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'iscsi-demo-target-tgtd-flzxk 0/1 ContainerCreating 0 4s iscsi-demo-target-tgtd-sgq6q 0/1 ContainerCreating 0 4s virt-api-fd96f94b5-9dssk 0/1 ContainerCreating 0 9s virt-api-fd96f94b5-jthmw 0/1 ContainerCreating 0 9s virt-controller-5f7c946cc4-8clll 0/1 ContainerCreating 0 9s virt-controller-5f7c946cc4-b84jp 0/1 ContainerCreating 0 9s virt-handler-5whzj 0/1 ContainerCreating 0 3s virt-handler-6kmt4 0/1 ContainerCreating 0 3s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + cluster/kubectl.sh get pods -n kube-system --no-headers + grep -v Running iscsi-demo-target-tgtd-flzxk 0/1 ContainerCreating 0 12s iscsi-demo-target-tgtd-sgq6q 0/1 ContainerCreating 0 12s virt-api-fd96f94b5-9dssk 0/1 ContainerCreating 0 17s virt-api-fd96f94b5-jthmw 0/1 ContainerCreating 0 17s virt-controller-5f7c946cc4-8clll 0/1 ContainerCreating 0 17s virt-controller-5f7c946cc4-b84jp 0/1 ContainerCreating 0 17s virt-handler-5whzj 0/1 ContainerCreating 0 11s virt-handler-6kmt4 0/1 ContainerCreating 0 11s + sleep 10 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'iscsi-demo-target-tgtd-flzxk 0/1 ContainerCreating 0 26s iscsi-demo-target-tgtd-sgq6q 0/1 ContainerCreating 0 26s virt-api-fd96f94b5-9dssk 0/1 ContainerCreating 0 31s virt-api-fd96f94b5-jthmw 0/1 ContainerCreating 0 31s virt-controller-5f7c946cc4-8clll 0/1 ContainerCreating 0 31s virt-controller-5f7c946cc4-b84jp 0/1 ContainerCreating 0 31s virt-handler-5whzj 0/1 ContainerCreating 0 25s virt-handler-6kmt4 0/1 ContainerCreating 0 25s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + cluster/kubectl.sh get pods -n kube-system --no-headers + grep -v Running iscsi-demo-target-tgtd-flzxk 0/1 ContainerCreating 0 46s iscsi-demo-target-tgtd-sgq6q 0/1 ContainerCreating 0 46s virt-api-fd96f94b5-9dssk 0/1 ContainerCreating 0 51s virt-api-fd96f94b5-jthmw 0/1 ContainerCreating 0 51s virt-controller-5f7c946cc4-8clll 0/1 ContainerCreating 0 51s virt-controller-5f7c946cc4-b84jp 0/1 ContainerCreating 0 51s virt-handler-5whzj 0/1 ContainerCreating 0 45s virt-handler-6kmt4 0/1 ContainerCreating 0 45s + sleep 10 ++ kubectl get pods -n kube-system --no-headers ++ grep -v Running ++ cluster/kubectl.sh get pods -n kube-system --no-headers + '[' -n 'iscsi-demo-target-tgtd-flzxk 0/1 ContainerCreating 0 1m iscsi-demo-target-tgtd-sgq6q 0/1 ContainerCreating 0 1m virt-api-fd96f94b5-jthmw 0/1 ContainerCreating 0 1m virt-controller-5f7c946cc4-b84jp 0/1 ContainerCreating 0 1m virt-handler-5whzj 0/1 ContainerCreating 0 1m virt-handler-6kmt4 0/1 ContainerCreating 0 1m' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + cluster/kubectl.sh get pods -n kube-system --no-headers + grep -v Running iscsi-demo-target-tgtd-flzxk 0/1 ContainerCreating 0 1m iscsi-demo-target-tgtd-sgq6q 0/1 ContainerCreating 0 1m virt-handler-5whzj 0/1 ContainerCreating 0 1m virt-handler-6kmt4 0/1 ContainerCreating 0 1m + sleep 10 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'iscsi-demo-target-tgtd-flzxk 0/1 ContainerCreating 0 2m virt-handler-6kmt4 0/1 ContainerCreating 0 1m' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + cluster/kubectl.sh get pods -n kube-system --no-headers + grep -v Running iscsi-demo-target-tgtd-flzxk 0/1 ContainerCreating 0 2m virt-handler-6kmt4 0/1 ContainerCreating 0 2m + sleep 10 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get pods) + '[' -n '' ']' ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ awk '!/virt-controller/ && /false/' ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get pods) + '[' -n '' ']' ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ awk '/virt-controller/ && /true/' ++ wc -l Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get pods) + '[' 0 -lt 1 ']' + echo 'Waiting for KubeVirt virt-controller container to become ready ...' Waiting for KubeVirt virt-controller container to become ready ... + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + awk '/virt-controller/ && /true/' + wc -l Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get pods) 0 + sleep 10 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ awk '/virt-controller/ && /true/' ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ wc -l Unable to connect to the server: unexpected EOF + '[' 0 -lt 1 ']' + echo 'Waiting for KubeVirt virt-controller container to become ready ...' Waiting for KubeVirt virt-controller container to become ready ... + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + awk '/virt-controller/ && /true/' + wc -l + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers Unable to connect to the server: read tcp 127.0.0.1:49576->127.0.0.1:36159: read: connection reset by peer 0 + sleep 10 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ awk '/virt-controller/ && /true/' ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ wc -l Unable to connect to the server: net/http: TLS handshake timeout + '[' 0 -lt 1 ']' + echo 'Waiting for KubeVirt virt-controller container to become ready ...' Waiting for KubeVirt virt-controller container to become ready ... + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + awk '/virt-controller/ && /true/' + wc -l 2 + sleep 10 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ awk '/virt-controller/ && /true/' ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ wc -l + '[' 2 -lt 1 ']' + kubectl get pods -n kube-system + cluster/kubectl.sh get pods -n kube-system NAME READY STATUS RESTARTS AGE iscsi-demo-target-tgtd-flzxk 0/1 Running 0 8m iscsi-demo-target-tgtd-sgq6q 0/1 Running 1 8m virt-api-fd96f94b5-9dssk 1/1 Running 0 8m virt-api-fd96f94b5-jthmw 1/1 Running 0 8m virt-controller-5f7c946cc4-8clll 1/1 Running 0 8m virt-controller-5f7c946cc4-b84jp 1/1 Running 0 8m virt-handler-5whzj 1/1 Running 0 8m virt-handler-6kmt4 0/1 ContainerCreating 0 8m + kubectl version + cluster/kubectl.sh version oc v3.9.0+ba7faec-1 kubernetes v1.9.1+a0ce1bc657 features: Basic-Auth GSSAPI Kerberos SPNEGO Server https://127.0.0.1:36159 openshift v3.9.0+ba7faec-1 kubernetes v1.9.1+a0ce1bc657 + ginko_params='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/junit.xml' + [[ -d /home/nfs/images/windows2016 ]] + [[ openshift-3.9-crio-release == windows ]] + FUNC_TEST_ARGS='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/junit.xml' + make functest hack/dockerized "hack/build-func-tests.sh" sha256:8b33043feeb10b27572d8053bdc7179a9496c83ded2be2221ae64b435ce6e0af go version go1.10 linux/amd64 go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/functests.sh Running Suite: Tests Suite ========================== Random Seed: 1528232181 Will run 109 of 109 specs • [SLOW TEST:11.809 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:48 with correct permissions /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:51 should be allowed to access subresource endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:52 ------------------------------ • [SLOW TEST:6.036 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:48 Without permissions /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:56 should not be able to access subresource endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:57 ------------------------------ • [SLOW TEST:6.876 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization For Version Command /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:63 with authenticated user /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:66 should be allowed to access subresource version endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:67 ------------------------------ • [SLOW TEST:6.263 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization For Version Command /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:63 Without permissions /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:71 should be able to access subresource version endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:72 ------------------------------ • Failure [181.661 seconds] VNC /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:48 with VNC connection /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:49 should allow accessing the VNC device [It] /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:50 Timed out after 90.182s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ STEP: Starting a VirtualMachineInstance • ------------------------------ • Failure [181.397 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 should start it [It] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:65 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure [181.724 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 should attach virt-launcher to it [It] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:71 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ •••• ------------------------------ • Failure [181.309 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:159 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Alpine as first boot [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.176s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ STEP: defining a VirtualMachineInstance with an Alpine disk STEP: adding a Cirros Disk STEP: setting boot order STEP: starting VirtualMachineInstance STEP: Waiting the VirtualMachineInstance start • Failure [181.476 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:159 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Cirros as first boot [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.184s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ STEP: defining a VirtualMachineInstance with an Alpine disk STEP: adding a Cirros Disk STEP: setting boot order STEP: starting VirtualMachineInstance STEP: Waiting the VirtualMachineInstance start • [SLOW TEST:61.581 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:186 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:187 should retry starting the VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:188 ------------------------------ •! Panic [61.286 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:186 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:187 should log warning and proceed once the secret is there [It] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:218 Test Panicked runtime error: invalid memory address or nil pointer dereference /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/panic.go:505 Full Stack Trace /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/panic.go:505 +0x229 kubevirt.io/kubevirt/tests_test.glob..func17.2.9.1.2() /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:239 +0x431 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).runSync(0xc4207980c0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/tests/tests_suite_test.go:42 +0xaa testing.tRunner(0xc4205313b0, 0x1269a80) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:777 +0xd0 created by testing.(*T).Run /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:824 +0x2e0 ------------------------------ STEP: Starting a VirtualMachineInstance STEP: Checking that VirtualMachineInstance start failed • Failure [181.963 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 when virt-launcher crashes /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:263 should be stopped and have Failed phase [It] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:264 Timed out after 90.184s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure [181.192 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 when virt-handler crashes /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:286 should recover and continue management [It] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:287 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • [SLOW TEST:56.086 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 when virt-handler is responsive /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:316 should indicate that a node is ready for vms /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:317 ------------------------------ • Failure in Spec Setup (BeforeEach) [182.248 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 when virt-handler is not responsive /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:347 the node controller should react [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:385 Timed out after 90.183s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ S [SKIPPING] [1.266 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:438 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-default [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:443 ------------------------------ S [SKIPPING] [1.468 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:438 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-alternative [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:443 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.938 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:499 should enable emulation in virt-launcher [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:519 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:515 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.896 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:59 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:499 should be reflected in domain XML [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:556 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:515 ------------------------------ • ------------------------------ • Failure [180.746 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Delete a VirtualMachineInstance's Pod /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:615 should result in the VirtualMachineInstance moving to a finalized state [It] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:616 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ STEP: Creating the VirtualMachineInstance • Failure [182.604 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:647 with an active pod. /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:648 should result in pod being terminated [It] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:649 Timed out after 90.101s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ STEP: Creating the VirtualMachineInstance • Failure [181.433 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:647 with grace period greater than 0 /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:672 should run graceful shutdown [It] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:673 Timed out after 90.002s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ STEP: Setting a VirtualMachineInstance termination grace period to 5 STEP: Creating the VirtualMachineInstance • Failure [181.964 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:724 should be in Failed phase [It] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:725 Timed out after 90.145s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ STEP: Starting a VirtualMachineInstance • Failure [181.465 seconds] Vmlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:45 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:724 should be left alone by virt-handler [It] /root/go/src/kubevirt.io/kubevirt/tests/vmlifecycle_test.go:752 Timed out after 90.186s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ STEP: Starting a VirtualMachineInstance • [SLOW TEST:18.624 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify only admin role has access only to kubevirt-config /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:42 ------------------------------ • [SLOW TEST:31.719 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vm /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:30.598 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given an ovm /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:30.266 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vm preset /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:30.061 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vm replica set /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • Failure [1.385 seconds] VMDefaults /root/go/src/kubevirt.io/kubevirt/tests/vmdefaults_test.go:33 Disk defaults /root/go/src/kubevirt.io/kubevirt/tests/vmdefaults_test.go:66 Should be applied to VMs [It] /root/go/src/kubevirt.io/kubevirt/tests/vmdefaults_test.go:68 Expected error: <*errors.StatusError | 0xc42003b0e0>: { ErrStatus: { TypeMeta: {Kind: "", APIVersion: ""}, ListMeta: {SelfLink: "", ResourceVersion: "", Continue: ""}, Status: "Failure", Message: "the server could not find the requested resource (post virtualmachines.kubevirt.io)", Reason: "NotFound", Details: { Name: "", Group: "kubevirt.io", Kind: "virtualmachines", UID: "", Causes: [ { Type: "UnexpectedServerResponse", Message: "404 page not found", Field: "", }, ], RetryAfterSeconds: 0, }, Code: 404, }, } the server could not find the requested resource (post virtualmachines.kubevirt.io) not to have occurred /root/go/src/kubevirt.io/kubevirt/tests/vmdefaults_test.go:71 ------------------------------ • Failure [360.661 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting and stopping the same VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:90 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:91 should success multiple times [It] /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:92 Timed out after 180.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ STEP: Starting the VirtualMachineInstance • Failure [121.561 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:111 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:112 should not modify the spec on status update [It] /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:113 Timed out after 60.174s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ STEP: Starting the VirtualMachineInstance • Failure [61.965 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting multiple VMs /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:129 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:130 should success [It] /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:131 Timed out after 30.179s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance • Failure in Spec Setup (BeforeEach) [181.528 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:47 VirtualMachineInstance attached to the pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:143 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure in Spec Setup (BeforeEach) [181.526 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:47 VirtualMachineInstance attached to the pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:143 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the internet /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure in Spec Setup (BeforeEach) [181.329 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:47 VirtualMachineInstance attached to the pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:143 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on the same node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure in Spec Setup (BeforeEach) [182.071 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:47 VirtualMachineInstance attached to the pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:143 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on a different node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.182s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure in Spec Setup (BeforeEach) [181.820 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:47 VirtualMachineInstance attached to the pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:143 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on the same node from Node /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure in Spec Setup (BeforeEach) [181.574 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:47 VirtualMachineInstance attached to the pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:143 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on a different node from Node /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure in Spec Setup (BeforeEach) [182.120 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:47 VirtualMachineInstance attached to the pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:143 with a service matching the vm exposed /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:255 should be able to reach the vm based on labels specified on the vm /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:275 Timed out after 90.171s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure in Spec Setup (BeforeEach) [181.382 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:47 VirtualMachineInstance attached to the pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:143 with a service matching the vm exposed /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:255 should fail to reach the vm if an invalid servicename is used /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:286 Timed out after 90.152s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure in Spec Setup (BeforeEach) [182.263 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:47 VirtualMachineInstance attached to the pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:143 with a subdomain and a headless service given /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:302 should be able to reach the vm via its unique fully qualified domain name /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:325 Timed out after 90.184s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure in Spec Setup (BeforeEach) [181.911 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:47 VirtualMachineInstance with custom interface model [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:357 should expose the right device type to the guest /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:358 Timed out after 90.161s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ • Failure in Spec Setup (BeforeEach) [181.703 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:47 VirtualMachineInstance with default interface model [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:371 should expose the right device type to the guest /root/go/src/kubevirt.io/kubevirt/tests/vm_networking_test.go:372 Timed out after 90.182s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:992 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.012 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to start a vm [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:132 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1268 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.010 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to stop a running vm [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:138 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1268 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.010 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:149 should have correct UUID /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:191 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1268 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.010 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:149 should have pod IP /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:207 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1268 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.011 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:225 should succeed to start a vm /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:241 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1268 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.012 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:225 should succeed to stop a vm /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:249 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1268 ------------------------------ • Failure [91.861 seconds] LeaderElection /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:43 Start a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:53 when the controller pod is not running /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:54 should success [It] /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:55 Timed out after 90.000s. Expected : virt-controller-5f7c946cc4-h7hqz to equal : virt-controller-5f7c946cc4-b84jp /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:73 ------------------------------ STEP: Destroying the leading controller pod ••panic: test timed out after 1h30m0s goroutine 14451 [running]: testing.(*M).startAlarm.func1() /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1240 +0xfc created by time.goFunc /gimme/.gimme/versions/go1.10.linux.amd64/src/time/sleep.go:172 +0x44 goroutine 1 [chan receive, 90 minutes]: testing.(*T).Run(0xc4205313b0, 0x11e4785, 0x9, 0x1269a80, 0x47fc86) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:825 +0x301 testing.runTests.func1(0xc4205312c0) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1063 +0x64 testing.tRunner(0xc4205312c0, 0xc4207dfdf8) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:777 +0xd0 testing.runTests(0xc4207d0a60, 0x1a72630, 0x1, 0x1, 0x412009) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1061 +0x2c4 testing.(*M).Run(0xc42002ca00, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:978 +0x171 main.main() _testmain.go:44 +0x151 goroutine 5 [chan receive]: kubevirt.io/kubevirt/vendor/github.com/golang/glog.(*loggingT).flushDaemon(0x1a990e0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/golang/glog/glog.go:879 +0x8b created by kubevirt.io/kubevirt/vendor/github.com/golang/glog.init.0 /root/go/src/kubevirt.io/kubevirt/vendor/github.com/golang/glog/glog.go:410 +0x203 goroutine 6 [syscall, 90 minutes]: os/signal.signal_recv(0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/sigqueue.go:139 +0xa6 os/signal.loop() /gimme/.gimme/versions/go1.10.linux.amd64/src/os/signal/signal_unix.go:22 +0x22 created by os/signal.init.0 /gimme/.gimme/versions/go1.10.linux.amd64/src/os/signal/signal_unix.go:28 +0x41 goroutine 71 [select]: kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion.(*AsyncAssertion).match(0xc420778b00, 0x12ec080, 0x1ab7728, 0x412801, 0x0, 0x0, 0x0, 0x1ab7728) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go:139 +0x2e6 kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion.(*AsyncAssertion).Should(0xc420778b00, 0x12ec080, 0x1ab7728, 0x0, 0x0, 0x0, 0xc420778b00) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go:48 +0x62 kubevirt.io/kubevirt/tests_test.glob..func4.3.4() /root/go/src/kubevirt.io/kubevirt/tests/ovm_test.go:201 +0x171 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).runSync(0xc4202ba960, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go:109 +0x9c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).run(0xc4202ba960, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go:63 +0x13e kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*ItNode).Run(0xc4204aaba0, 0x12e02a0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go:25 +0x7f kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec.(*Spec).runSample(0xc4206c7790, 0x0, 0x12e02a0, 0xc4200e6f60) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec/spec.go:176 +0x5a6 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec.(*Spec).Run(0xc4206c7790, 0x12e02a0, 0xc4200e6f60) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec/spec.go:127 +0xe3 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpec(0xc4201e2dc0, 0xc4206c7790, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:198 +0x10d kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpecs(0xc4201e2dc0, 0x126a701) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:168 +0x32c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run(0xc4201e2dc0, 0xb) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:64 +0xdc kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/suite.(*Suite).Run(0xc4200ea370, 0x7f886c034b98, 0xc4205313b0, 0x11e6ace, 0xb, 0xc4207d0aa0, 0x2, 0x2, 0x12f9b80, 0xc4200e6f60, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/suite/suite.go:62 +0x27c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo.RunSpecsWithCustomReporters(0x12e1020, 0xc4205313b0, 0x11e6ace, 0xb, 0xc4207d0a80, 0x2, 0x2, 0x2) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go:218 +0x258 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo.RunSpecsWithDefaultAndCustomReporters(0x12e1020, 0xc4205313b0, 0x11e6ace, 0xb, 0xc42041d5d0, 0x1, 0x1, 0x1) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go:206 +0xab kubevirt.io/kubevirt/tests_test.TestTests(0xc4205313b0) /root/go/src/kubevirt.io/kubevirt/tests/tests_suite_test.go:42 +0xaa testing.tRunner(0xc4205313b0, 0x1269a80) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:777 +0xd0 created by testing.(*T).Run /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:824 +0x2e0 goroutine 72 [chan receive, 90 minutes]: kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).registerForInterrupts(0xc4201e2dc0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:220 +0xc0 created by kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:59 +0x60 goroutine 39 [select, 90 minutes, locked to thread]: runtime.gopark(0x126b790, 0x0, 0x11e1590, 0x6, 0x18, 0x1) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/proc.go:291 +0x11a runtime.selectgo(0xc420093f50, 0xc420606060) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/select.go:392 +0xe50 runtime.ensureSigM.func1() /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/signal_unix.go:549 +0x1f4 runtime.goexit() /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/asm_amd64.s:2361 +0x1 goroutine 77 [IO wait]: internal/poll.runtime_pollWait(0x7f886c020ea0, 0x72, 0xc420989850) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/netpoll.go:173 +0x57 internal/poll.(*pollDesc).wait(0xc42034ef18, 0x72, 0xffffffffffffff00, 0x12e1fc0, 0x198a638) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_poll_runtime.go:85 +0x9b internal/poll.(*pollDesc).waitRead(0xc42034ef18, 0xc420732000, 0x8000, 0x8000) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_poll_runtime.go:90 +0x3d internal/poll.(*FD).Read(0xc42034ef00, 0xc420732000, 0x8000, 0x8000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_unix.go:157 +0x17d net.(*netFD).Read(0xc42034ef00, 0xc420732000, 0x8000, 0x8000, 0x0, 0x8, 0x7ffb) /gimme/.gimme/versions/go1.10.linux.amd64/src/net/fd_unix.go:202 +0x4f net.(*conn).Read(0xc4201147b0, 0xc420732000, 0x8000, 0x8000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/net/net.go:176 +0x6a crypto/tls.(*block).readFromUntil(0xc42076fa40, 0x7f886c0350b0, 0xc4201147b0, 0x5, 0xc4201147b0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:493 +0x96 crypto/tls.(*Conn).readRecord(0xc4202bee00, 0x126b817, 0xc4202bef20, 0x20) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:595 +0xe0 crypto/tls.(*Conn).Read(0xc4202bee00, 0xc420623000, 0x1000, 0x1000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:1156 +0x100 bufio.(*Reader).Read(0xc4207717a0, 0xc42064e118, 0x9, 0x9, 0xc420818bf8, 0xc42036b080, 0xc420989d10) /gimme/.gimme/versions/go1.10.linux.amd64/src/bufio/bufio.go:216 +0x238 io.ReadAtLeast(0x12def20, 0xc4207717a0, 0xc42064e118, 0x9, 0x9, 0x9, 0xc420989ce0, 0xc420989ce0, 0x406614) /gimme/.gimme/versions/go1.10.linux.amd64/src/io/io.go:309 +0x86 io.ReadFull(0x12def20, 0xc4207717a0, 0xc42064e118, 0x9, 0x9, 0xc420818ba0, 0xc420989d10, 0xc400006a01) /gimme/.gimme/versions/go1.10.linux.amd64/src/io/io.go:327 +0x58 kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.readFrameHeader(0xc42064e118, 0x9, 0x9, 0x12def20, 0xc4207717a0, 0x0, 0xc400000000, 0x8a97ad, 0xc420989fb0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/frame.go:237 +0x7b kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*Framer).ReadFrame(0xc42064e0e0, 0xc420bee240, 0x0, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/frame.go:492 +0xa4 kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*clientConnReadLoop).run(0xc420989fb0, 0x126a948, 0xc420484fb0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:1428 +0x8e kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*ClientConn).readLoop(0xc4202c9380) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:1354 +0x76 created by kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*Transport).newClientConn /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:579 +0x651 make: *** [functest] Error 2 + make cluster-down ./cluster/down.sh