+ export WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + [[ openshift-3.10-release =~ openshift-.* ]] + [[ openshift-3.10-release =~ .*-crio-.* ]] + export KUBEVIRT_PROVIDER=os-3.10.0 + KUBEVIRT_PROVIDER=os-3.10.0 + export KUBEVIRT_NUM_NODES=2 + KUBEVIRT_NUM_NODES=2 + export NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + export NAMESPACE=kube-system + NAMESPACE=kube-system + trap '{ make cluster-down; }' EXIT SIGINT SIGTERM SIGSTOP + make cluster-down ./cluster/down.sh + make cluster-up ./cluster/up.sh Downloading .................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. Downloading ....... 2018/06/26 11:53:44 Waiting for host: 192.168.66.102:22 2018/06/26 11:53:47 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/26 11:53:55 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/26 11:54:03 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/26 11:54:11 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/26 11:54:19 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/26 11:54:27 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/26 11:54:32 Connected to tcp://192.168.66.102:22 + systemctl stop origin-node.service + rm -rf /etc/origin/ /etc/etcd/ /var/lib/origin /var/lib/etcd/ ++ docker ps -q + containers= + '[' -n '' ']' ++ docker ps -q -a + containers='380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8' + '[' -n '380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8' ']' + docker rm -f 380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8 380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8 2018/06/26 11:54:36 Waiting for host: 192.168.66.101:22 2018/06/26 11:54:39 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/06/26 11:54:47 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/06/26 11:54:55 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/06/26 11:55:03 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/06/26 11:55:08 Connected to tcp://192.168.66.101:22 + inventory_file=/root/inventory + openshift_ansible=/root/openshift-ansible + echo '[new_nodes]' + sed -i '/\[OSEv3:children\]/a new_nodes' /root/inventory + nodes_found=false ++ seq 2 100 + for i in '$(seq 2 100)' ++ printf node%02d 2 + node=node02 ++ printf 192.168.66.1%02d 2 + node_ip=192.168.66.102 + set +e + ping 192.168.66.102 -c 1 PING 192.168.66.102 (192.168.66.102) 56(84) bytes of data. 64 bytes from 192.168.66.102: icmp_seq=1 ttl=64 time=0.636 ms --- 192.168.66.102 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 0.636/0.636/0.636/0.000 ms Found node02. Adding it to the inventory. + '[' 0 -ne 0 ']' + nodes_found=true + set -e + echo '192.168.66.102 node02' + echo 'Found node02. Adding it to the inventory.' + echo 'node02 openshift_node_group_name="node-config-compute" openshift_schedulable=true openshift_ip=192.168.66.102' + for i in '$(seq 2 100)' ++ printf node%02d 3 + node=node03 ++ printf 192.168.66.1%02d 3 + node_ip=192.168.66.103 + set +e + ping 192.168.66.103 -c 1 PING 192.168.66.103 (192.168.66.103) 56(84) bytes of data. From 192.168.66.101 icmp_seq=1 Destination Host Unreachable --- 192.168.66.103 ping statistics --- 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms + '[' 1 -ne 0 ']' + break + '[' true = true ']' + ansible-playbook -i /root/inventory /root/openshift-ansible/playbooks/openshift-node/scaleup.yml PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) PLAY [Ensure there are new_nodes] ********************************************** TASK [fail] ******************************************************************** skipping: [localhost] TASK [fail] ******************************************************************** skipping: [localhost] PLAY [Initialization Checkpoint Start] ***************************************** TASK [Set install initialization 'In Progress'] ******************************** ok: [node01] PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) [WARNING]: Could not match supplied host pattern, ignoring: oo_lb_to_config PLAY [Ensure that all non-node hosts are accessible] *************************** TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Initialize basic host facts] ********************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/deprecations.yml for node01, node02 TASK [openshift_sanitize_inventory : Check for usage of deprecated variables] *** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : debug] ************************************ skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_stats] ******************************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Assign deprecated variables to correct counterparts] *** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml for node01, node02 included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml for node01, node02 TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Standardize on latest variable names] ***** ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : Normalize openshift_release] ************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/unsupported.yml for node01, node02 TASK [openshift_sanitize_inventory : Ensure that openshift_use_dnsmasq is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that openshift_node_dnsmasq_install_network_manager_hook is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* skipping: [node01] => (item=openshift_hosted_etcd_storage_kind) skipping: [node02] => (item=openshift_hosted_etcd_storage_kind) TASK [openshift_sanitize_inventory : Ensure that dynamic provisioning is set if using dynamic storage] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure clusterid is set along with the cloudprovider] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that all requires vsphere configuration variables are set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : ensure provider configuration variables are defined] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure removed web console extension variables are not set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that web console port matches API server port] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : At least one master is schedulable] ******* skipping: [node01] skipping: [node02] TASK [Detecting Operating System from ostree_booted] *************************** ok: [node02] ok: [node01] TASK [set openshift_deployment_type if unset] ********************************** skipping: [node01] skipping: [node02] TASK [check for node already bootstrapped] ************************************* ok: [node02] ok: [node01] TASK [initialize_facts set fact openshift_is_bootstrapped] ********************* ok: [node01] ok: [node02] TASK [initialize_facts set fact openshift_is_atomic and openshift_is_containerized] *** ok: [node01] ok: [node02] TASK [Determine Atomic Host Docker Version] ************************************ skipping: [node01] skipping: [node02] TASK [assert atomic host docker version is 1.12 or later] ********************** skipping: [node01] skipping: [node02] PLAY [Retrieve existing master configs and validate] *************************** TASK [openshift_control_plane : stat] ****************************************** ok: [node01] TASK [openshift_control_plane : slurp] ***************************************** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [openshift_control_plane : Check for file paths outside of /etc/origin/master in master's config] *** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Initialize special first-master variables] ******************************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Disable web console if required] ***************************************** TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Setup yum repositories for all hosts] ************************************ TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Install Red Hat Subscription manager] ******************* skipping: [node02] TASK [rhel_subscribe : Is host already registered?] **************************** skipping: [node02] TASK [rhel_subscribe : Register host] ****************************************** skipping: [node02] TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Determine if OpenShift Pool Already Attached] *********** skipping: [node02] TASK [rhel_subscribe : Attach to OpenShift Pool] ******************************* skipping: [node02] TASK [rhel_subscribe : Satellite preparation] ********************************** skipping: [node02] TASK [openshift_repos : openshift_repos detect ostree] ************************* ok: [node02] TASK [openshift_repos : Ensure libselinux-python is installed] ***************** ok: [node02] TASK [openshift_repos : Remove openshift_additional.repo file] ***************** ok: [node02] TASK [openshift_repos : Create any additional repos that are defined] ********** TASK [openshift_repos : include_tasks] ***************************************** skipping: [node02] TASK [openshift_repos : include_tasks] ***************************************** included: /root/openshift-ansible/roles/openshift_repos/tasks/centos_repos.yml for node02 TASK [openshift_repos : Configure origin gpg keys] ***************************** ok: [node02] TASK [openshift_repos : Configure correct origin release repository] *********** ok: [node02] => (item=/root/openshift-ansible/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2) TASK [openshift_repos : Ensure clean repo cache in the event repos have been changed manually] *** changed: [node02] => { "msg": "First run of openshift_repos" } TASK [openshift_repos : Record that openshift_repos already ran] *************** ok: [node02] RUNNING HANDLER [openshift_repos : refresh cache] ****************************** changed: [node02] PLAY [Install packages necessary for installer] ******************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Determine if chrony is installed] **************************************** changed: [node02] [WARNING]: Consider using the yum, dnf or zypper module rather than running rpm. If you need to use command because yum, dnf or zypper is insufficient you can add warn=False to this command task or set command_warnings=False in ansible.cfg to get rid of this message. TASK [Install ntp package] ***************************************************** skipping: [node02] TASK [Start and enable ntpd/chronyd] ******************************************* changed: [node02] TASK [Ensure openshift-ansible installer package deps are installed] *********** ok: [node02] => (item=iproute) ok: [node02] => (item=dbus-python) ok: [node02] => (item=PyYAML) ok: [node02] => (item=python-ipaddress) ok: [node02] => (item=libsemanage-python) ok: [node02] => (item=yum-utils) ok: [node02] => (item=python-docker) PLAY [Initialize cluster facts] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [get openshift_current_version] ******************************************* ok: [node02] ok: [node01] TASK [set_fact openshift_portal_net if present on masters] ********************* ok: [node01] ok: [node02] TASK [Gather Cluster facts] **************************************************** changed: [node02] changed: [node01] TASK [Set fact of no_proxy_internal_hostnames] ********************************* skipping: [node01] skipping: [node02] TASK [Initialize openshift.node.sdn_mtu] *************************************** changed: [node02] ok: [node01] PLAY [Initialize etcd host variables] ****************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role] ************************************************************ TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** ok: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] => { "msg": "openshift_pkg_version was not defined. Falling back to -3.10.0" } TASK [openshift_version : set_fact] ******************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Verify Requirements] ***************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [Run variable sanity checks] ********************************************** ok: [node01] TASK [Validate openshift_node_groups and openshift_node_group_name] ************ ok: [node01] PLAY [Initialization Checkpoint End] ******************************************* TASK [Set install initialization 'Complete'] *********************************** ok: [node01] PLAY [Validate node hostnames] ************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Query DNS for IP address of node02] ************************************** ok: [node02] TASK [Validate openshift_hostname when defined] ******************************** skipping: [node02] TASK [Validate openshift_ip exists on node when defined] *********************** skipping: [node02] PLAY [Configure os_firewall] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [os_firewall : Detecting Atomic Host Operating System] ******************** ok: [node02] TASK [os_firewall : Set fact r_os_firewall_is_atomic] ************************** ok: [node02] TASK [os_firewall : Fail - Firewalld is not supported on Atomic Host] ********** skipping: [node02] TASK [os_firewall : Install firewalld packages] ******************************** skipping: [node02] TASK [os_firewall : Ensure iptables services are not enabled] ****************** skipping: [node02] => (item=iptables) skipping: [node02] => (item=ip6tables) TASK [os_firewall : Wait 10 seconds after disabling iptables] ****************** skipping: [node02] TASK [os_firewall : Start and enable firewalld service] ************************ skipping: [node02] TASK [os_firewall : need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail] *** skipping: [node02] TASK [os_firewall : Restart polkitd] ******************************************* skipping: [node02] TASK [os_firewall : Wait for polkit action to have been created] *************** skipping: [node02] TASK [os_firewall : Ensure firewalld service is not enabled] ******************* ok: [node02] TASK [os_firewall : Wait 10 seconds after disabling firewalld] ***************** skipping: [node02] TASK [os_firewall : Install iptables packages] ********************************* ok: [node02] => (item=iptables) ok: [node02] => (item=iptables-services) TASK [os_firewall : Start and enable iptables service] ************************* ok: [node02 -> node02] => (item=node02) TASK [os_firewall : need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail] *** skipping: [node02] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [container_runtime : Setup the docker-storage for overlay] **************** skipping: [node02] TASK [container_runtime : Create file system on extra volume device] *********** TASK [container_runtime : Create mount entry for extra volume] ***************** PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** ok: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* ok: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Add http_proxy to /etc/atomic.conf] ****************** skipping: [node02] TASK [container_runtime : Add https_proxy to /etc/atomic.conf] ***************** skipping: [node02] TASK [container_runtime : Add no_proxy to /etc/atomic.conf] ******************** skipping: [node02] TASK [container_runtime : Get current installed Docker version] **************** ok: [node02] TASK [container_runtime : Error out if Docker pre-installed but too old] ******* skipping: [node02] TASK [container_runtime : Error out if requested Docker is too old] ************ skipping: [node02] TASK [container_runtime : Install Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure docker.service.d directory exists] ************ ok: [node02] TASK [container_runtime : Configure Docker service unit file] ****************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Set registry params] ********************************* skipping: [node02] => (item={u'reg_conf_var': u'ADD_REGISTRY', u'reg_flag': u'--add-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'BLOCK_REGISTRY', u'reg_flag': u'--block-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'INSECURE_REGISTRY', u'reg_flag': u'--insecure-registry', u'reg_fact_val': []}) TASK [container_runtime : Place additional/blocked/insecure registries in /etc/containers/registries.conf] *** skipping: [node02] TASK [container_runtime : Set Proxy Settings] ********************************** skipping: [node02] => (item={u'reg_conf_var': u'HTTP_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'HTTPS_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'NO_PROXY', u'reg_fact_val': u''}) TASK [container_runtime : Set various Docker options] ************************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Configure Docker Network OPTIONS] ******************** ok: [node02] TASK [container_runtime : Detect if docker is already started] ***************** ok: [node02] TASK [container_runtime : Start the Docker service] **************************** ok: [node02] TASK [container_runtime : set_fact] ******************************************** ok: [node02] TASK [container_runtime : Check for docker_storage_path/overlay2] ************** ok: [node02] TASK [container_runtime : Fixup SELinux permissions for docker] **************** changed: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] TASK [container_runtime : Fail if Atomic Host since this is an rpm request] **** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Check that overlay is in the kernel] ***************** skipping: [node02] TASK [container_runtime : Add overlay to modprobe.d] *************************** skipping: [node02] TASK [container_runtime : Manually modprobe overlay into the kernel] *********** skipping: [node02] TASK [container_runtime : Enable and start systemd-modules-load] *************** skipping: [node02] TASK [container_runtime : Install cri-o] *************************************** skipping: [node02] TASK [container_runtime : Remove CRI-O default configuration files] ************ skipping: [node02] => (item=/etc/cni/net.d/200-loopback.conf) skipping: [node02] => (item=/etc/cni/net.d/100-crio-bridge.conf) TASK [container_runtime : Create the CRI-O configuration] ********************** skipping: [node02] TASK [container_runtime : Ensure CNI configuration directory exists] *********** skipping: [node02] TASK [container_runtime : Add iptables allow rules] **************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove iptables rules] ******************************* TASK [container_runtime : Add firewalld allow rules] *************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove firewalld allow rules] ************************ TASK [container_runtime : Configure the CNI network] *************************** skipping: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-network] ****************** skipping: [node02] TASK [container_runtime : Start the CRI-O service] ***************************** skipping: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* skipping: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** skipping: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** skipping: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role] ************************************************************ TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** skipping: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Node Preparation Checkpoint Start] *************************************** TASK [Set Node preparation 'In Progress'] ************************************** ok: [node01] PLAY [Only target nodes that have not yet been bootstrapped] ******************* TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [add_host] **************************************************************** skipping: [localhost] => (item=node02) ok: [localhost] => (item=node01) PLAY [Disable excluders] ******************************************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/disable.yml for node02 TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-docker-excluder version detected] ************ skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-docker-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-excluder version detected] ******************* skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** skipping: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] PLAY [Configure nodes] ********************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_cloud_provider : Set cloud provider facts] ********************* skipping: [node02] TASK [openshift_cloud_provider : Create cloudprovider config dir] ************** skipping: [node02] TASK [openshift_cloud_provider : include the defined cloud provider files] ***** skipping: [node02] TASK [openshift_node : fail] *************************************************** skipping: [node02] TASK [openshift_node : Check for NetworkManager service] *********************** ok: [node02] TASK [openshift_node : Set fact using_network_manager] ************************* ok: [node02] TASK [openshift_node : Install dnsmasq] **************************************** ok: [node02] TASK [openshift_node : ensure origin/node directory exists] ******************** changed: [node02] => (item=/etc/origin) changed: [node02] => (item=/etc/origin/node) TASK [openshift_node : Install NetworkManager during node_bootstrap provisioning] *** skipping: [node02] TASK [openshift_node : Install network manager dispatch script] **************** skipping: [node02] TASK [openshift_node : Install dnsmasq configuration] ************************** ok: [node02] TASK [openshift_node : Deploy additional dnsmasq.conf] ************************* skipping: [node02] TASK [openshift_node : Enable dnsmasq] ***************************************** ok: [node02] TASK [openshift_node : Install network manager dispatch script] **************** ok: [node02] TASK [openshift_node : Add iptables allow rules] ******************************* ok: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) ok: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) ok: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) ok: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) ok: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove iptables rules] ********************************** TASK [openshift_node : Add firewalld allow rules] ****************************** skipping: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) skipping: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) skipping: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) skipping: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) skipping: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove firewalld allow rules] *************************** TASK [openshift_node : Checking for journald.conf] ***************************** ok: [node02] TASK [openshift_node : Create journald persistence directories] **************** ok: [node02] TASK [openshift_node : Update journald setup] ********************************** ok: [node02] => (item={u'var': u'Storage', u'val': u'persistent'}) ok: [node02] => (item={u'var': u'Compress', u'val': True}) ok: [node02] => (item={u'var': u'SyncIntervalSec', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitInterval', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitBurst', u'val': 10000}) ok: [node02] => (item={u'var': u'SystemMaxUse', u'val': u'8G'}) ok: [node02] => (item={u'var': u'SystemKeepFree', u'val': u'20%'}) ok: [node02] => (item={u'var': u'SystemMaxFileSize', u'val': u'10M'}) ok: [node02] => (item={u'var': u'MaxRetentionSec', u'val': u'1month'}) ok: [node02] => (item={u'var': u'MaxFileSec', u'val': u'1day'}) ok: [node02] => (item={u'var': u'ForwardToSyslog', u'val': False}) ok: [node02] => (item={u'var': u'ForwardToWall', u'val': False}) TASK [openshift_node : Restart journald] *************************************** skipping: [node02] TASK [openshift_node : Disable swap] ******************************************* ok: [node02] TASK [openshift_node : Install node, clients, and conntrack packages] ********** ok: [node02] => (item={u'name': u'origin-node-3.10.0*'}) ok: [node02] => (item={u'name': u'origin-clients-3.10.0*'}) ok: [node02] => (item={u'name': u'conntrack-tools'}) TASK [openshift_node : Restart cri-o] ****************************************** skipping: [node02] TASK [openshift_node : restart NetworkManager to ensure resolv.conf is present] *** changed: [node02] TASK [openshift_node : sysctl] ************************************************* ok: [node02] TASK [openshift_node : Check for credentials file for registry auth] *********** skipping: [node02] TASK [openshift_node : Create credentials for registry auth] ******************* skipping: [node02] TASK [openshift_node : Create credentials for registry auth (alternative)] ***** skipping: [node02] TASK [openshift_node : Setup ro mount of /root/.docker for containerized hosts] *** skipping: [node02] TASK [openshift_node : Check that node image is present] *********************** changed: [node02] TASK [openshift_node : Pre-pull node image] ************************************ skipping: [node02] TASK [openshift_node : Copy node script to the node] *************************** ok: [node02] TASK [openshift_node : Install Node service file] ****************************** ok: [node02] TASK [openshift_node : Ensure old system path is set] ************************** skipping: [node02] => (item=/etc/origin/openvswitch) skipping: [node02] => (item=/var/lib/kubelet) skipping: [node02] => (item=/opt/cni/bin) TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Copy node container image to ostree storage] ************ skipping: [node02] TASK [openshift_node : Install or Update node system container] **************** skipping: [node02] TASK [openshift_node : Restart network manager to ensure networking configuration is in place] *** skipping: [node02] TASK [openshift_node : Configure Node settings] ******************************** ok: [node02] => (item={u'regex': u'^OPTIONS=', u'line': u'OPTIONS='}) ok: [node02] => (item={u'regex': u'^DEBUG_LOGLEVEL=', u'line': u'DEBUG_LOGLEVEL=2'}) ok: [node02] => (item={u'regex': u'^IMAGE_VERSION=', u'line': u'IMAGE_VERSION=v3.10.0-rc.0'}) TASK [openshift_node : Configure Proxy Settings] ******************************* skipping: [node02] => (item={u'regex': u'^HTTP_PROXY=', u'line': u'HTTP_PROXY='}) skipping: [node02] => (item={u'regex': u'^HTTPS_PROXY=', u'line': u'HTTPS_PROXY='}) skipping: [node02] => (item={u'regex': u'^NO_PROXY=', u'line': u'NO_PROXY=[],172.30.0.0/16,10.128.0.0/14'}) TASK [openshift_node : file] *************************************************** skipping: [node02] TASK [openshift_node : Create the Node config] ********************************* changed: [node02] TASK [openshift_node : Configure Node Environment Variables] ******************* TASK [openshift_node : Ensure the node static pod directory exists] ************ changed: [node02] TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Install NFS storage plugin dependencies] **************** ok: [node02] TASK [openshift_node : Check for existence of nfs sebooleans] ****************** ok: [node02] => (item=virt_use_nfs) ok: [node02] => (item=virt_sandbox_use_nfs) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-26 12:02:44.918679', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.016315', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-26 12:02:44.902364', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-26 12:02:46.030652', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.016347', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-26 12:02:46.014305', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-26 12:02:44.918679', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.016315', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-26 12:02:44.902364', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-26 12:02:46.030652', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.016347', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-26 12:02:46.014305', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install GlusterFS storage plugin dependencies] ********** ok: [node02] TASK [openshift_node : Check for existence of fusefs sebooleans] *************** ok: [node02] => (item=virt_use_fusefs) ok: [node02] => (item=virt_sandbox_use_fusefs) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-26 12:02:52.258679', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.015944', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-26 12:02:52.242735', '_ansible_ignore_errors': None, 'failed': False}) ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-26 12:02:53.473941', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.016158', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-26 12:02:53.457783', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-26 12:02:52.258679', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.015944', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-26 12:02:52.242735', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-26 12:02:53.473941', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.016158', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-26 12:02:53.457783', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install Ceph storage plugin dependencies] *************** ok: [node02] TASK [openshift_node : Install iSCSI storage plugin dependencies] ************** ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=device-mapper-multipath) TASK [openshift_node : restart services] *************************************** ok: [node02] => (item=multipathd) ok: [node02] => (item=rpcbind) ok: [node02] => (item=iscsid) TASK [openshift_node : Template multipath configuration] *********************** changed: [node02] TASK [openshift_node : Enable and start multipath] ***************************** changed: [node02] TASK [tuned : Check for tuned package] ***************************************** ok: [node02] TASK [tuned : Set tuned OpenShift variables] *********************************** ok: [node02] TASK [tuned : Ensure directory structure exists] ******************************* ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Ensure files are populated from templates] *********************** skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Make tuned use the recommended tuned profile on restart] ********* changed: [node02] => (item=/etc/tuned/active_profile) changed: [node02] => (item=/etc/tuned/profile_mode) TASK [tuned : Restart tuned service] ******************************************* changed: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Install logrotate] ******* ok: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Setup logrotate.d scripts] *** PLAY [node bootstrap config] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_node : install needed rpm(s)] ********************************** ok: [node02] => (item=origin-node) ok: [node02] => (item=origin-docker-excluder) ok: [node02] => (item=ansible) ok: [node02] => (item=bash-completion) ok: [node02] => (item=docker) ok: [node02] => (item=haproxy) ok: [node02] => (item=dnsmasq) ok: [node02] => (item=ntp) ok: [node02] => (item=logrotate) ok: [node02] => (item=httpd-tools) ok: [node02] => (item=bind-utils) ok: [node02] => (item=firewalld) ok: [node02] => (item=libselinux-python) ok: [node02] => (item=conntrack-tools) ok: [node02] => (item=openssl) ok: [node02] => (item=iproute) ok: [node02] => (item=python-dbus) ok: [node02] => (item=PyYAML) ok: [node02] => (item=yum-utils) ok: [node02] => (item=glusterfs-fuse) ok: [node02] => (item=device-mapper-multipath) ok: [node02] => (item=nfs-utils) ok: [node02] => (item=cockpit-ws) ok: [node02] => (item=cockpit-system) ok: [node02] => (item=cockpit-bridge) ok: [node02] => (item=cockpit-docker) ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=ceph-common) TASK [openshift_node : create the directory for node] ************************** skipping: [node02] TASK [openshift_node : laydown systemd override] ******************************* skipping: [node02] TASK [openshift_node : update the sysconfig to have necessary variables] ******* ok: [node02] => (item={u'regexp': u'^KUBECONFIG=.*', u'line': u'KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig'}) TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : disable origin-node service] **************************** changed: [node02] => (item=origin-node.service) TASK [openshift_node : Check for RPM generated config marker file .config_managed] *** ok: [node02] TASK [openshift_node : create directories for bootstrapping] ******************* ok: [node02] => (item=/root/openshift_bootstrap) changed: [node02] => (item=/var/lib/origin/openshift.local.config) changed: [node02] => (item=/var/lib/origin/openshift.local.config/node) ok: [node02] => (item=/etc/docker/certs.d/docker-registry.default.svc:5000) TASK [openshift_node : laydown the bootstrap.yml file for on boot configuration] *** ok: [node02] TASK [openshift_node : Create a symlink to the node client CA for the docker registry] *** ok: [node02] TASK [openshift_node : Remove RPM generated config files if present] *********** skipping: [node02] => (item=master) skipping: [node02] => (item=.config_managed) TASK [openshift_node : find all files in /etc/origin/node so we can remove them] *** skipping: [node02] TASK [openshift_node : Remove everything except the resolv.conf required for node] *** skipping: [node02] TASK [openshift_node_group : create node config template] ********************** changed: [node02] TASK [openshift_node_group : remove existing node config] ********************** changed: [node02] TASK [openshift_node_group : Ensure required directories are present] ********** ok: [node02] => (item=/etc/origin/node/pods) changed: [node02] => (item=/etc/origin/node/certificates) TASK [openshift_node_group : Update the sysconfig to group "node-config-compute"] *** changed: [node02] TASK [set_fact] **************************************************************** ok: [node02] PLAY [Re-enable excluder if it was previously enabled] ************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/enable.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] PLAY [Node Preparation Checkpoint End] ***************************************** TASK [Set Node preparation 'Complete'] ***************************************** ok: [node01] PLAY [Distribute bootstrap and start nodes] ************************************ TASK [openshift_node : Gather node information] ******************************** changed: [node02] ok: [node01] TASK [openshift_node : Copy master bootstrap config locally] ******************* ok: [node02] TASK [openshift_node : Distribute bootstrap kubeconfig if one does not exist] *** ok: [node01] changed: [node02] TASK [openshift_node : Start and enable node for bootstrapping] **************** changed: [node02] changed: [node01] TASK [openshift_node : Get node logs] ****************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : debug] ************************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : fail] *************************************************** skipping: [node02] skipping: [node01] PLAY [Approve any pending CSR requests from inventory nodes] ******************* TASK [Dump all candidate bootstrap hostnames] ********************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Find all hostnames for bootstrapping] ************************************ ok: [node01] TASK [Dump the bootstrap hostnames] ******************************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Approve bootstrap nodes] ************************************************* changed: [node01] TASK [Get CSRs] **************************************************************** skipping: [node01] TASK [Report approval errors] ************************************************** skipping: [node01] PLAY [Ensure any inventory labels are applied to the nodes] ******************** TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_manage_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_manage_node : Wait for Node Registration] ********************** ok: [node01 -> node01] ok: [node02 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/config.yml for node02, node01 TASK [openshift_manage_node : Set node schedulability] ************************* ok: [node02 -> node01] ok: [node01 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/set_default_node_role.yml for node02, node01 TASK [openshift_manage_node : Retrieve nodes that are marked with the infra selector or the legacy infra selector] *** ok: [node02 -> node01] TASK [openshift_manage_node : Label infra or legacy infra nodes with the new role label] *** TASK [openshift_manage_node : Retrieve non-infra, non-master nodes that are not yet labeled compute] *** ok: [node02 -> node01] TASK [openshift_manage_node : label non-master non-infra nodes compute] ******** TASK [openshift_manage_node : Label all-in-one master as a compute node] ******* skipping: [node02] PLAY RECAP ********************************************************************* localhost : ok=30 changed=0 unreachable=0 failed=0 node01 : ok=71 changed=3 unreachable=0 failed=0 node02 : ok=155 changed=33 unreachable=0 failed=0 INSTALLER STATUS *************************************************************** Initialization : Complete (0:03:37) Node Preparation : Complete (0:04:09) Sending file modes: C0755 110489328 oc Sending file modes: C0600 5645 admin.kubeconfig Cluster "node01:8443" set. Cluster "node01:8443" set. + set +e + kubectl get nodes --no-headers + cluster/kubectl.sh get nodes --no-headers node01 Ready compute,infra,master 4d v1.10.0+b81c8f8 node02 Ready compute 1m v1.10.0+b81c8f8 + kubectl_rc=0 + '[' 0 -ne 0 ']' ++ kubectl get nodes --no-headers ++ cluster/kubectl.sh get nodes --no-headers ++ grep NotReady + '[' -n '' ']' + set -e + echo 'Nodes are ready:' Nodes are ready: + kubectl get nodes + cluster/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION node01 Ready compute,infra,master 4d v1.10.0+b81c8f8 node02 Ready compute 1m v1.10.0+b81c8f8 + make cluster-sync ./cluster/build.sh Building ... sha256:2df8b30e8f619e28e75e00ea9fa42c63f4f14b1c34fbb1223214102337507863 go version go1.10 linux/amd64 go version go1.10 linux/amd64 make[1]: Entering directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' hack/dockerized "./hack/check.sh && KUBEVIRT_VERSION= ./hack/build-go.sh install " && ./hack/build-copy-artifacts.sh sha256:2df8b30e8f619e28e75e00ea9fa42c63f4f14b1c34fbb1223214102337507863 go version go1.10 linux/amd64 go version go1.10 linux/amd64 find: '/root/go/src/kubevirt.io/kubevirt/_out/cmd': No such file or directory Compiling tests... compiled tests.test hack/build-docker.sh build Sending build context to Docker daemon 36.24 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 6af39ea33818 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-controller ---> Using cache ---> 45ed71cd684b Step 4/8 : WORKDIR /home/virt-controller ---> Using cache ---> ba8171a31e93 Step 5/8 : USER 1001 ---> Using cache ---> 6bd535be1fa1 Step 6/8 : COPY virt-controller /usr/bin/virt-controller ---> 0f2b19033618 Removing intermediate container 0e13fa7b0482 Step 7/8 : ENTRYPOINT /usr/bin/virt-controller ---> Running in 16884c4e1630 ---> 558671af22d0 Removing intermediate container 16884c4e1630 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-controller" '' ---> Running in cd05a109869c ---> 4dc62ed937ce Removing intermediate container cd05a109869c Successfully built 4dc62ed937ce Sending build context to Docker daemon 38.19 MB Step 1/10 : FROM kubevirt/libvirt:3.7.0 ---> 60c80c8f7523 Step 2/10 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 3bbd31ef6597 Step 3/10 : RUN dnf -y install socat genisoimage util-linux libcgroup-tools ethtool net-tools sudo && dnf -y clean all && test $(id -u qemu) = 107 # make sure that the qemu user really is 107 ---> Using cache ---> 7fca7eb9d4da Step 4/10 : COPY virt-launcher /usr/bin/virt-launcher ---> f4830a9281f9 Removing intermediate container c1d322ad484c Step 5/10 : COPY kubevirt-sudo /etc/sudoers.d/kubevirt ---> d11b929e861d Removing intermediate container bf931652a7cf Step 6/10 : RUN setcap CAP_NET_BIND_SERVICE=+eip /usr/bin/qemu-system-x86_64 ---> Running in 63fb4a1662ff  ---> 57b8cc00fe3e Removing intermediate container 63fb4a1662ff Step 7/10 : RUN mkdir -p /usr/share/kubevirt/virt-launcher ---> Running in a1c470bd8337  ---> 814d84c92970 Removing intermediate container a1c470bd8337 Step 8/10 : COPY entrypoint.sh libvirtd.sh sh.sh sock-connector /usr/share/kubevirt/virt-launcher/ ---> 693cc3a4d3c4 Removing intermediate container 73bea9e8a5a2 Step 9/10 : ENTRYPOINT /usr/share/kubevirt/virt-launcher/entrypoint.sh ---> Running in 4a249b589584 ---> e96ae1590ddb Removing intermediate container 4a249b589584 Step 10/10 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-launcher" '' ---> Running in 3d57a3ff62b5 ---> a7cb3e7a5951 Removing intermediate container 3d57a3ff62b5 Successfully built a7cb3e7a5951 Sending build context to Docker daemon 39.56 MB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 6af39ea33818 Step 3/5 : COPY virt-handler /usr/bin/virt-handler ---> d861d8d8766e Removing intermediate container 95d1d2d8d40e Step 4/5 : ENTRYPOINT /usr/bin/virt-handler ---> Running in 33f9b0895e5b ---> 1019d9be3bc0 Removing intermediate container 33f9b0895e5b Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-handler" '' ---> Running in 047979b4fd86 ---> c9a2d183249c Removing intermediate container 047979b4fd86 Successfully built c9a2d183249c Sending build context to Docker daemon 37.01 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 6af39ea33818 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-api ---> Using cache ---> 12e3c00eb78f Step 4/8 : WORKDIR /home/virt-api ---> Using cache ---> cfb92cbbf126 Step 5/8 : USER 1001 ---> Using cache ---> f02f77c7a4fc Step 6/8 : COPY virt-api /usr/bin/virt-api ---> b723e46c6bde Removing intermediate container 334d723081e9 Step 7/8 : ENTRYPOINT /usr/bin/virt-api ---> Running in 96a63e399d67 ---> 775e46fb1955 Removing intermediate container 96a63e399d67 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-api" '' ---> Running in 0197ac8a027c ---> 685acbe58599 Removing intermediate container 0197ac8a027c Successfully built 685acbe58599 Sending build context to Docker daemon 4.096 kB Step 1/7 : FROM fedora:27 ---> 9110ae7f579f Step 2/7 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 6af39ea33818 Step 3/7 : ENV container docker ---> Using cache ---> 1211fd5eb075 Step 4/7 : RUN mkdir -p /images/custom /images/alpine && truncate -s 64M /images/custom/disk.img && curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /images/alpine/disk.img ---> Using cache ---> ac806f8eae52 Step 5/7 : ADD entrypoint.sh / ---> Using cache ---> e31eeb9c22c5 Step 6/7 : CMD /entrypoint.sh ---> Using cache ---> ecb35f794669 Step 7/7 : LABEL "disks-images-provider" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Running in aa34a09365a8 ---> 2fd2741b8bdc Removing intermediate container aa34a09365a8 Successfully built 2fd2741b8bdc Sending build context to Docker daemon 2.56 kB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 6af39ea33818 Step 3/5 : ENV container docker ---> Using cache ---> 1211fd5eb075 Step 4/5 : RUN dnf -y install procps-ng nmap-ncat && dnf -y clean all ---> Using cache ---> 7b90d68258cd Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "vm-killer" '' ---> Running in 034abacecef0 ---> 2784c2e3a8f2 Removing intermediate container 034abacecef0 Successfully built 2784c2e3a8f2 Sending build context to Docker daemon 5.12 kB Step 1/7 : FROM debian:sid ---> 4817bb6590f8 Step 2/7 : MAINTAINER "David Vossel" \ ---> Using cache ---> b8b166db2544 Step 3/7 : ENV container docker ---> Using cache ---> 8b120f56086f Step 4/7 : RUN apt-get update && apt-get install -y bash curl bzip2 qemu-utils && mkdir -p /disk && rm -rf /var/lib/apt/lists/* ---> Using cache ---> 61851ac93c11 Step 5/7 : ADD entry-point.sh / ---> Using cache ---> ada85930060d Step 6/7 : CMD /entry-point.sh ---> Using cache ---> 6f2ffb0e7aed Step 7/7 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "registry-disk-v1alpha" '' ---> Running in 20f27f765cdc ---> 0d8d7526c858 Removing intermediate container 20f27f765cdc Successfully built 0d8d7526c858 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32801/kubevirt/registry-disk-v1alpha:devel ---> 0d8d7526c858 Step 2/4 : MAINTAINER "David Vossel" \ ---> Running in d91289c5eae0 ---> 041bb7a77c53 Removing intermediate container d91289c5eae0 Step 3/4 : RUN curl https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img > /disk/cirros.img ---> Running in a66202bededb   % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0  0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 6 12.1M 6 816k 0 0 612k 0 0:00:20 0:00:01 0:00:19 612k 18 12.1M 18 2304k 0 0 983k 0 0:00:12 0:00:02 0:00:10 982k 30 12.1M 30 3824k 0 0 1144k 0 0:00:10 0:00:03 0:00:07 1144k 43 12.1M 43 5376k 0 0 1235k 0 0:00:10 0:00:04 0:00:06 1235k 55 12.1M 55 6944k 0 0 1294k 0 0:00:09 0:00:05 0:00:04 1401k 68 12.1M 68 8496k 0 0 1341k 0 0:00:09 0:00:06 0:00:03 1535k 80 12.1M 80 9.8M 0 0 1367k 0 0:00:09 0:00:07 0:00:02 1547k 90 12.1M 90 10.9M 0 0 1348k 0 0:00:09 0:00:08 0:00:01 1484k 98 12.1M 98 11.9M 0 0 1308k 0 0:00:09 0:00:09 --:--:-- 1372k 100 12.1M 100 12.1M 0 0 1305k 0 0:00:09 0:00:09 --:--:-- 1319k  ---> e3a266f8e14b Removing intermediate container a66202bededb Step 4/4 : LABEL "cirros-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Running in a14511099e26 ---> f82bb834e5e2 Removing intermediate container a14511099e26 Successfully built f82bb834e5e2 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32801/kubevirt/registry-disk-v1alpha:devel ---> 0d8d7526c858 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Running in 759fed4dd186 ---> cb59e78d73e4 Removing intermediate container 759fed4dd186 Step 3/4 : RUN curl -g -L https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 > /disk/fedora.qcow2 ---> Running in 56c928846720   % Total % Received % Xferd Average Speed Time Time  Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:--  0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0  0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 2 221M 2 4895k 0 0 2300k 0 0:01:38 0:00:02 0:01:36 4822k 4 221M 4 10.4M 0 0 3484k 0 0:01:05 0:00:03 0:01:02 5462k 7 221M 7 16.9M 0 0 4266k 0 0:00:53 0:00:04 0:00:49 5875k 10 221M 10 23.3M 0 0 4709k 0 0:00:48 0:00:05 0:00:43 6029k 12 221M 12 28.3M 0 0 4781k 0 0:00:47 0:00:06 0:00:41 5856k 15 221M 15 33.4M 0 0 4849k 0 0:00:46 0:00:07 0:00:39 5947k 17 221M 17 38.9M 0 0 4945k 0 0:00:45 0:00:08 0:00:37 5844k 19 221M 19 42.2M 0 0 4767k 0 0:00:47 0:00:09 0:00:38 5173k 19 221M 19 44.2M 0 0 4504k 0 0:00:50 0:00:10 0:00:40 4295k 20 221M 20 45.6M 0 0 4224k 0 0:00:53 0:00:11 0:00:42 3550k 21 221M 21 47.0M 0 0 3993k 0 0:00:56 0:00:12 0:00:44 2779k 21 221M 21 48.5M 0 0 3802k 0 0:00:59 0:00:13 0:00:46 1968k 22 221M 22 50.2M 0 0 3651k 0 0:01:02 0:00:14 0:00:48 1627k 23 221M 23 51.6M 0 0 3505k 0 0:01:04 0:00:15 0:00:49 1503k 24 221M 24 53.1M 0 0 3388k 0 0:01:06 0:00:16 0:00:50 1538k 24 221M 24 54.9M 0 0 3294k 0 0:01:08 0:00:17 0:00:51 1617k 25 221M 25 56.6M 0 0 3214k 0 0:01:10 0:00:18 0:00:52 1668k 26 221M 26 58.6M 0 0 3139k 0 0:01:12 0:00:19 0:00:53 1702k 27 221M 27 60.1M 0 0 3067k 0 0:01:13 0:00:20 0:00:53 1741k 27 221M 27 61.4M 0 0 2982k 0 0:01:16 0:00:21 0:00:55 1680k 28 221M 28 62.7M 0 0 2912k 0 0:01:17 0:00:22  0:00:55 1604k 29 221M 29 64.3M 0 0 2852k 0 0:01:19 0:00:23 0:00:56 1560k 29 221M 29 65.5M 0 0 2790k 0 0:01:21 0:00:24 0:00:57 1443k 30 221M 30 67.0M 0 0 2737k 0 0:01:22 0:00:25 0:00:57 1413k 30 221M 30 68.0M 0 0 2674k 0 0:01:24 0:00:26 0:00:58 1368k 31 221M 31 69.0M 0 0 2610k 0 0:01:26 0:00:27 0:00:59 1281k 31 221M 31 70.1M 0 0 2559k 0 0:01:28 0:00:28 0:01:00 1189k 32 221M 32 71.1M 0 0 2506k 0 0:01:30 0:00:29 0:01:01 1139k 32 221M 32 72.1M 0 0 2457k 0 0:01:32 0:00:30 0:01:02 1047k 32 221M 32 73.0M 0 0 2407k 0 0:01:34 0:00:31 0:01:03 1024k 33 221M 33 74.1M 0 0 2366k 0 0:01:35 0:00:32 0:01:03 1042k 34 221M 34 75.3M 0 0 2332k 0 0:01:37 0:00:33 0:01:04 1064k 34 221M 34 76.7M 0 0 2306k 0 0:01:38 0:00:34 0:01:04 1144k 35 221M 35 78.3M 0 0 2285k 0 0:01:39 0:00:35 0:01:04 1258k 36 221M 36 79.9M 0 0 2270k 0 0:01:39 0:00:36 0:01:03 1419k 36 221M 36 81.8M 0 0 2261k 0 0:01:40 0:00:37 0:01:03 1582k 37 221M 37 83.8M 0 0 2257k 0 0:01:40 0:00:38 0:01:02 1753k 39 221M 39 86.6M 0 0 2270k 0 0:01:39 0:00:39 0:01:00 2026k 40 221M 40 90.3M 0 0 2308k 0 0:01:38 0:00:40 0:00:58 2470k 42 221M 42 95.1M 0 0 2372k 0 0:01:35 0:00:41 0:00:54 3106k 44 221M 44 99.2M 0 0 2416k 0 0:01:33 0:00:42 0:00:51 3575k 46 221M 46 102M 0 0 2440k 0 0:01:32 0:00:43 0:00:49 3825k 47 221M 47 105M 0 0 2447k 0 0:01:32 0:00:44 0:00:48 3833k 48 221M 48 107M 0 0 2446k 0 0:01:32 0:00:45 0:00:47 3555k 49 221M 49 109M 0 0 2430k 0 0:01:33 0:00:46 0:00:47 2903k 49 221M 49 110M  0 0 2397k 0 0:01:34 0:00:47 0:00:47 2234k 50 221M 50 111M 0 0 2368k 0 0:01:35 0:00:48 0:00:47 1749k 50 221M 50 112M 0 0 2342k 0 0:01:36 0:00:49 0:00:47 1417k 51 221M 51 113M 0 0 2316k 0 0:01:37 0:00:50 0:00:47 1143k 51 221M 51 114M 0 0 2293k 0 0:01:38 0:00:51 0:00:47 1036k 51 221M 51 115M 0 0 2265k 0 0:01:40 0:00:52 0:00:48 1018k 52 221M 52 115M 0 0 2236k 0 0:01:41 0:00:53 0:00:48 971k 52 221M 52 116M 0 0 2209k 0 0:01:42 0:00:54 0:00:48 904k 53 221M 53 117M 0 0 2185k 0 0:01:43 0:00:55 0:00:48 874k 53 221M 53 118M 0 0 2163k 0 0:01:44 0:00:56 0:00:48 832k 53 221M 53 119M 0 0 2144k 0 0:01:45 0:00:57 0:00:48 888k 54 221M 54 120M 0 0 2129k 0 0:01:46 0:00:58 0:00:48 990k 55 221M 55 122M 0 0 2117k 0 0:01:47 0:00:59 0:00:48 1122k 55 221M 55 123M 0 0 2109k 0 0:01:47 0:01:00 0:00:47 1266k 56 221M 56 125M 0 0 2103k 0 0:01:47 0:01:01 0:00:46 1429k 57 221M 57 127M 0 0 2100k 0 0:01:48 0:01:02 0:00:46 1596k 58 221M 58 129M 0 0 2105k 0 0:01:47 0:01:03 0:00:44 1823k 60 221M 60 132M 0 0 2125k 0 0:01:46 0:01:04 0:00:42 2210k 61 221M 61 135M 0 0 2134k 0 0:01:46 0:01:05 0:00:41 2440k 62 221M 62 137M 0 0 2137k 0 0:01:46 0:01:06 0:00:40 2563k 63 221M 63 140M 0 0 2142k 0 0:01:45 0:01:07 0:00:38 2670k 64 221M 64 142M 0 0 2149k 0 0:01:45 0:01:08 0:00:37 2708k 65 221M 65 145M 0 0 2155k 0 0:01:45 0:01:09 0:00:36 2544k 66 221M 66 147M 0 0 2155k 0 0:01:45 0:01:10 0:00:35 2430k 67 221M 67 148M 0 0 2144k 0 0:01:45 0:01:11 0:00:34 2234k 67 221M 67 150M 0 0 2134k 0 0:01:46 0:01:12 0:00:34 2019k 68 221M 68 151M 0 0 2125k 0 0:01:46 0:01:13 0:00:33 1805k 69 221M 69 153M 0 0 2120k 0 0:01:47 0:01:14 0:00:33 1630k 69 221M 69 154M 0 0 2111k 0 0:01:47 0:01:15 0:00:32 1496k 70 221M 70 156M 0 0 2105k 0 0:01:47 0:01:16 0:00:31 1547k 71 221M 71 158M 0 0 2101k 0 0:01:47 0:01:17 0:00:30 1621k 72 221M 72 159M 0 0 2097k 0 0:01:48 0:01:18 0:00:30 1682k 73 221M 73 161M 0 0 2095k 0 0:01:48 0:01:19 0:00:29 1722k 73 221M 73 163M 0 0 2094k 0 0:01:48 0:01:20 0:00:28 1844k 74 221M 74 165M 0 0 2096k 0 0:01:48 0:01:21 0:00:27 1960k 75 221M 75 168M 0 0 2100k 0 0:01:47 0:01:22 0:00:25 2096k 77 221M 77 170M 0 0 2103k 0 0:01:47 0:01:23 0:00:24 2198k 77 221M 77 172M 0 0 2103k 0 0:01:47 0:01:24 0:00:23 2242k 78 221M 78 174M 0 0 2100k 0 0:01:47 0:01:25 0:00:22 2193k 79 221M 79 176M 0 0 2098k 0 0:01:48 0:01:26 0:00:22 2130k 80 221M 80 177M 0 0 2089k 0 0:01:48 0:01:27 0:00:21 1900k 80 221M 80 178M 0 0 2078k 0 0:01:49 0:01:28 0:00:21 1655k 81 221M 81 179M 0 0 2065k 0 0:01:49 0:01:29 0:00:20 1421k 81 221M 81 180M 0 0 2054k 0 0:01:50 0:01:30 0:00:20 1270k 82 221M 82 181M 0 0 2046k 0 0:01:50 0:01:31 0:00:19 1132k 82 221M 82 183M 0 0 2039k 0 0:01:51 0:01:32 0:00:19 1169k 83 221M 83 184M 0 0 2033k 0 0:01:51 0:01:33 0:00:18 1256k 84 221M 84 186M 0 0 2026k 0 0:01:51 0:01:34 0:00:17 1329k 84 221M 84 187M 0 0 2015k 0 0:01:52 0:01:35 0:00:17 1312k 84 221M 84 188M 0 0 2006k 0 0:01:53 0:01:36 0:00:17 1286k 85 221M 85 189M 0 0 1999k 0 0:01:53 0:01:37 0:00:16 1267k 86 221M 86 190M 0 0 1994k 0 0:01:53 0:01:38 0:00:15 1253k 86 221M 86 192M 0 0 1986k 0 0:01:54 0:01:39 0:00:15 1242k 87 221M 87 193M 0 0 1981k 0 0:01:54 0:01:40 0:00:14 1327k 88 221M 88 195M 0 0 1977k 0 0:01:54 0:01:41 0:00:13 1414k 88 221M 88 196M 0 0 1973k 0 0:01:54 0:01:42 0:00:12 1472k 89 221M 89 198M 0 0 1970k 0 0:01:55 0:01:43 0:00:12 1500k 90 221M 90 199M 0 0 1964k 0 0:01:55 0:01:44 0:00:11 1531k 90 221M 90 201M 0 0 1959k 0 0:01:55 0:01:45 0:00:10 1516k 91 221M 91 202M 0 0 1950k 0 0:01:56 0:01:46 0:00:10 1404k 91 221M 91 203M 0 0 1942k 0 0:01:56 0:01:47 0:00:09 1305k 92 221M 92 204M 0 0 1936k 0 0:01:57 0:01:48 0:00:09 1237k 92 221M 92 205M 0 0 1932k 0 0:01:57 0:01:49 0:00:08 1248k 93 221M 93 207M 0 0 1929k 0 0:01:57 0:01:50 0:00:07 1306k 94 221M  94 209M 0 0 1927k 0 0:01:57 0:01:51 0:00:06 1451k 95 221M 95 211M 0 0 1928k 0 0:01:57 0:01:52 0:00:05 1619k 96 221M 96 213M 0 0 1929k 0 0:01:57 0:01:53 0:00:04 1787k 97 221M 97 215M 0 0 1934k 0 0:01:57 0:01:54 0:00:03 1992k 98 221M 98 218M 0 0 1941k 0 0:01:56 0:01:55 0:00:01 2199k 99 221M 99 219M 0 0 1940k 0 0:01:56 0:01:56 --:--:-- 2227k 100 221M 100 221M 0 0 1941k 0 0:01:56 0:01:56 --:--:-- 2266k  ---> ca16734d7b7d Removing intermediate container 56c928846720 Step 4/4 : LABEL "fedora-cloud-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Running in 9e2a3664def3 ---> 27845b53190b Removing intermediate container 9e2a3664def3 Successfully built 27845b53190b Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32801/kubevirt/registry-disk-v1alpha:devel ---> 0d8d7526c858 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> cb59e78d73e4 Step 3/4 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /disk/alpine.iso ---> Running in 2c4a426a9629   % Total % Received % Xferd Average Speed Time Time Time Current  Dload Upload Total Spent  Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 1 37.0M 1 414k 0 0 465k 0 0:01:21 --:--:-- 0:01:21 465k 56 37.0M 56 21.0M 0  0 11.2M 0 0:00:03 0:00:01 0:00:02 11.2M 100 37.0M 100 37.0M 0 0 14.9M 0 0:00:02 0:00:02 --:--:-- 14.9M  ---> 9c869e4eb1b4 Removing intermediate container 2c4a426a9629 Step 4/4 : LABEL "alpine-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Running in c7baf8bc41a7 ---> 26e11baced51 Removing intermediate container c7baf8bc41a7 Successfully built 26e11baced51 Sending build context to Docker daemon 34.04 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 6af39ea33818 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virtctl ---> Using cache ---> 62cf8151a5f3 Step 4/8 : WORKDIR /home/virtctl ---> Using cache ---> 7df4da9e1b5d Step 5/8 : USER 1001 ---> Using cache ---> 3ee421ac4ad4 Step 6/8 : COPY subresource-access-test /subresource-access-test ---> 7e64124a7df6 Removing intermediate container 65ab00394485 Step 7/8 : ENTRYPOINT /subresource-access-test ---> Running in e55c04515027 ---> ade4aaef8658 Removing intermediate container e55c04515027 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "subresource-access-test" '' ---> Running in e5f92feaeac4 ---> 4d2400c7902a Removing intermediate container e5f92feaeac4 Successfully built 4d2400c7902a Sending build context to Docker daemon 3.072 kB Step 1/9 : FROM fedora:27 ---> 9110ae7f579f Step 2/9 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 6af39ea33818 Step 3/9 : ENV container docker ---> Using cache ---> 1211fd5eb075 Step 4/9 : RUN dnf -y install make git gcc && dnf -y clean all ---> Using cache ---> 7ff1a45e3635 Step 5/9 : ENV GIMME_GO_VERSION 1.9.2 ---> Using cache ---> a05ebaed4a0f Step 6/9 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> cd8398be9593 Step 7/9 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 71c7ecd55e24 Step 8/9 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/masterzen/winrm-cli ---> Using cache ---> 9689e3184427 Step 9/9 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "winrmcli" '' ---> Running in 4423ecaa1a23 ---> 5ec227208a87 Removing intermediate container 4423ecaa1a23 Successfully built 5ec227208a87 hack/build-docker.sh push The push refers to a repository [localhost:32801/kubevirt/virt-controller] f948e0fb2336: Preparing c0d2c4546d78: Preparing 39bae602f753: Preparing c0d2c4546d78: Pushed f948e0fb2336: Pushed 39bae602f753: Pushed devel: digest: sha256:05f4b3b228524af4b8ff1b0801924e303e12ab4edf27b9a8b90a7c374c90ba37 size: 948 The push refers to a repository [localhost:32801/kubevirt/virt-launcher] 484f37dcdab6: Preparing 903836327d7d: Preparing 28c045626d35: Preparing e3e2f279b468: Preparing cfc148b539be: Preparing fa30d8d5eeb1: Preparing 530cc55618cd: Preparing 34fa414dfdf6: Preparing a1359dc556dd: Preparing 490c7c373332: Preparing 4b440db36f72: Preparing 39bae602f753: Preparing fa30d8d5eeb1: Waiting 530cc55618cd: Waiting 34fa414dfdf6: Waiting 39bae602f753: Waiting 490c7c373332: Waiting a1359dc556dd: Waiting 903836327d7d: Pushed 484f37dcdab6: Pushed e3e2f279b468: Pushed 530cc55618cd: Pushed 34fa414dfdf6: Pushed 490c7c373332: Pushed a1359dc556dd: Pushed 39bae602f753: Mounted from kubevirt/virt-controller 28c045626d35: Pushed cfc148b539be: Pushed fa30d8d5eeb1: Pushed 4b440db36f72: Pushed devel: digest: sha256:2933eccfb3014570268a4dfec46781c90d1901f68821061f7666381b1dc1841c size: 2828 The push refers to a repository [localhost:32801/kubevirt/virt-handler] ebf68bc1d802: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-launcher ebf68bc1d802: Pushed devel: digest: sha256:654850868d5cb7f73d15b873185f79fce10fccd03d8eb6ae33b0e0feca786a80 size: 741 The push refers to a repository [localhost:32801/kubevirt/virt-api] e620c35853be: Preparing ae4970287372: Preparing 39bae602f753: Preparing 39bae602f753: Waiting 39bae602f753: Mounted from kubevirt/virt-handler ae4970287372: Pushed e620c35853be: Pushed devel: digest: sha256:4b99e09c57bb33106896440111f96eb10245e7220ae2a68d386f0925d705791a size: 948 The push refers to a repository [localhost:32801/kubevirt/disks-images-provider] 5c28b30e6fcd: Preparing 153871b39e50: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-api 5c28b30e6fcd: Pushed 153871b39e50: Pushed devel: digest: sha256:c7d1a600791cc8cc912cd5571c74ac21bf500d8df7a5db1df8f5e4579589149a size: 948 The push refers to a repository [localhost:32801/kubevirt/vm-killer] e3afff5758ce: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/disks-images-provider e3afff5758ce: Pushed devel: digest: sha256:342dea10af4b89870e7b8a8723cf5ab613941a7bf6e0eeed42cea96559d5f25d size: 740 The push refers to a repository [localhost:32801/kubevirt/registry-disk-v1alpha] 376d512574a4: Preparing 7971c2f81ae9: Preparing e7752b410e4c: Preparing 376d512574a4: Pushed 7971c2f81ae9: Pushed e7752b410e4c: Pushed devel: digest: sha256:96d402fbce9e63898799cf82964a82634e66f3103666f37ce059415083c116a3 size: 948 The push refers to a repository [localhost:32801/kubevirt/cirros-registry-disk-demo] 4fe2d29fc704: Preparing 376d512574a4: Preparing 7971c2f81ae9: Preparing e7752b410e4c: Preparing 376d512574a4: Waiting 7971c2f81ae9: Waiting e7752b410e4c: Waiting 376d512574a4: Mounted from kubevirt/registry-disk-v1alpha 7971c2f81ae9: Mounted from kubevirt/registry-disk-v1alpha e7752b410e4c: Mounted from kubevirt/registry-disk-v1alpha 4fe2d29fc704: Pushed devel: digest: sha256:6ed77f8619833b11c074fecafe86c91f5b130e28128e04718627e8b1d52713cf size: 1160 The push refers to a repository [localhost:32801/kubevirt/fedora-cloud-registry-disk-demo] b350977c0199: Preparing 376d512574a4: Preparing 7971c2f81ae9: Preparing e7752b410e4c: Preparing e7752b410e4c: Mounted from kubevirt/cirros-registry-disk-demo 376d512574a4: Mounted from kubevirt/cirros-registry-disk-demo 7971c2f81ae9: Mounted from kubevirt/cirros-registry-disk-demo b350977c0199: Pushed devel: digest: sha256:6db49e4605f8a0d0d8d03a8a72af9e7f4aadeab0e3b4b0a0857bb876b2df70bb size: 1161 The push refers to a repository [localhost:32801/kubevirt/alpine-registry-disk-demo] 160479230abb: Preparing 376d512574a4: Preparing 7971c2f81ae9: Preparing e7752b410e4c: Preparing e7752b410e4c: Mounted from kubevirt/fedora-cloud-registry-disk-demo 376d512574a4: Mounted from kubevirt/fedora-cloud-registry-disk-demo 7971c2f81ae9: Mounted from kubevirt/fedora-cloud-registry-disk-demo 160479230abb: Pushed devel: digest: sha256:679ba9b394266d58ff0a3caff6b1e102c658b5e53a086051136c5d4b9e7a63ff size: 1160 The push refers to a repository [localhost:32801/kubevirt/subresource-access-test] 6397ade657a4: Preparing 2aaca144a3e2: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/vm-killer 2aaca144a3e2: Pushed 6397ade657a4: Pushed devel: digest: sha256:0fc13bec9350bfa194d62af3831d6a5b5751015e9e77d75cafc19b66821225d8 size: 948 The push refers to a repository [localhost:32801/kubevirt/winrmcli] 3cd438b33e81: Preparing 8519683f2557: Preparing a29ba32ac0a1: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/subresource-access-test 3cd438b33e81: Pushed a29ba32ac0a1: Pushed 8519683f2557: Pushed devel: digest: sha256:7b14a2ed86abee60b66bd04406c1804d1bc6db9ffdd11b7e9a37c87bdfd4c8c8 size: 1165 make[1]: Leaving directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' Done ./cluster/clean.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release0 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release0 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-alpha.4-6-gb1d7e58 ++ KUBEVIRT_VERSION=v0.7.0-alpha.4-6-gb1d7e58 + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:0977f1c716862710c8324798b95802e11a149f4532e33be20dd70877fe8f5332 ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ kubeconfig=cluster/vagrant/.kubeconfig +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32801/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Cleaning up ...' Cleaning up ... + cluster/kubectl.sh get vmis --all-namespaces -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,FINALIZERS:.metadata.finalizers --no-headers + grep foregroundDeleteVirtualMachine + read p error: the server doesn't have a resource type "vmis" + _kubectl delete ds -l kubevirt.io -n kube-system --cascade=false --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=libvirt --force --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=virt-handler --force --grace-period 0 No resources found + namespaces=(default ${namespace}) + for i in '${namespaces[@]}' + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete deployment -l kubevirt.io No resources found + _kubectl -n default delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rs -l kubevirt.io No resources found + _kubectl -n default delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete services -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n default delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete secrets -l kubevirt.io No resources found + _kubectl -n default delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pv -l kubevirt.io No resources found + _kubectl -n default delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pvc -l kubevirt.io No resources found + _kubectl -n default delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete ds -l kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n default delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pods -l kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n default delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rolebinding -l kubevirt.io No resources found + _kubectl -n default delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete roles -l kubevirt.io No resources found + _kubectl -n default delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterroles -l kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete serviceaccounts -l kubevirt.io No resources found ++ wc -l ++ _kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n default get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + for i in '${namespaces[@]}' + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete deployment -l kubevirt.io No resources found + _kubectl -n kube-system delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rs -l kubevirt.io No resources found + _kubectl -n kube-system delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete services -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n kube-system delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete secrets -l kubevirt.io No resources found + _kubectl -n kube-system delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pv -l kubevirt.io No resources found + _kubectl -n kube-system delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pvc -l kubevirt.io No resources found + _kubectl -n kube-system delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete ds -l kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n kube-system delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pods -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete roles -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterroles -l kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ wc -l ++ cluster/os-3.10.0/.kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + sleep 2 + echo Done Done ./cluster/deploy.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release0 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release0 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-alpha.4-6-gb1d7e58 ++ KUBEVIRT_VERSION=v0.7.0-alpha.4-6-gb1d7e58 + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:0977f1c716862710c8324798b95802e11a149f4532e33be20dd70877fe8f5332 ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ kubeconfig=cluster/vagrant/.kubeconfig +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32801/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Deploying ...' Deploying ... + [[ -z openshift-3.10-release ]] + [[ openshift-3.10-release =~ .*-dev ]] + [[ openshift-3.10-release =~ .*-release ]] + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/demo-content.yaml =~ .*demo.* ]] + continue + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml =~ .*demo.* ]] + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml clusterrole.rbac.authorization.k8s.io "kubevirt.io:admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:edit" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:view" created serviceaccount "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver-auth-delegator" created rolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created role.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-controller" created serviceaccount "kubevirt-controller" created serviceaccount "kubevirt-privileged" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller-cluster-admin" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-privileged-cluster-admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:default" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt.io:default" created service "virt-api" created deployment.extensions "virt-api" created deployment.extensions "virt-controller" created daemonset.extensions "virt-handler" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstances.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancereplicasets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancepresets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachines.kubevirt.io" created + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R persistentvolumeclaim "disk-alpine" created persistentvolume "host-path-disk-alpine" created persistentvolumeclaim "disk-custom" created persistentvolume "host-path-disk-custom" created daemonset.extensions "disks-images-provider" created serviceaccount "kubevirt-testing" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-testing-cluster-admin" created + [[ os-3.10.0 =~ os-* ]] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-controller"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-testing"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-privileged"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-apiserver"] + _kubectl adm policy add-scc-to-user privileged admin + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged admin scc "privileged" added to: ["admin"] + echo Done Done + namespaces=(kube-system default) + [[ kube-system != \k\u\b\e\-\s\y\s\t\e\m ]] + timeout=300 + sample=30 + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'disks-images-provider-nfn9l 0/1 ContainerCreating 0 2s disks-images-provider-qxddj 0/1 ContainerCreating 0 2s virt-api-7d79764579-5bx29 0/1 ContainerCreating 0 3s virt-api-7d79764579-l6c7w 0/1 ContainerCreating 0 3s virt-controller-7d57d96b65-gw7zd 0/1 ContainerCreating 0 3s virt-controller-7d57d96b65-t5pcs 0/1 ContainerCreating 0 3s virt-handler-x7w96 0/1 ContainerCreating 0 3s virt-handler-zmwp5 0/1 ContainerCreating 0 3s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + grep -v Running + cluster/kubectl.sh get pods -n kube-system --no-headers disks-images-provider-nfn9l 0/1 ContainerCreating 0 3s disks-images-provider-qxddj 0/1 ContainerCreating 0 3s virt-api-7d79764579-5bx29 0/1 ContainerCreating 0 4s virt-api-7d79764579-l6c7w 0/1 ContainerCreating 0 4s virt-controller-7d57d96b65-gw7zd 0/1 ContainerCreating 0 4s virt-controller-7d57d96b65-t5pcs 0/1 ContainerCreating 0 4s virt-handler-x7w96 0/1 ContainerCreating 0 4s virt-handler-zmwp5 0/1 ContainerCreating 0 4s + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'disks-images-provider-nfn9l 0/1 ContainerCreating 0 36s disks-images-provider-qxddj 0/1 ContainerCreating 0 36s virt-api-7d79764579-l6c7w 0/1 ContainerCreating 0 37s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + grep -v Running + cluster/kubectl.sh get pods -n kube-system --no-headers disks-images-provider-nfn9l 0/1 ContainerCreating 0 39s disks-images-provider-qxddj 0/1 ContainerCreating 0 39s virt-api-7d79764579-l6c7w 0/1 ContainerCreating 0 40s + sleep 30 + current_time=60 + '[' 60 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'virt-api-7d79764579-l6c7w 0/1 ContainerCreating 0 1m' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + grep -v Running + kubectl get pods -n kube-system --no-headers + cluster/kubectl.sh get pods -n kube-system --no-headers virt-api-7d79764579-l6c7w 0/1 ContainerCreating 0 1m + sleep 30 + current_time=90 + '[' 90 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n kube-system + cluster/kubectl.sh get pods -n kube-system NAME READY STATUS RESTARTS AGE disks-images-provider-nfn9l 1/1 Running 0 1m disks-images-provider-qxddj 1/1 Running 0 1m master-api-node01 1/1 Running 1 4d master-controllers-node01 1/1 Running 1 4d master-etcd-node01 1/1 Running 1 4d virt-api-7d79764579-5bx29 1/1 Running 0 1m virt-api-7d79764579-l6c7w 1/1 Running 0 1m virt-controller-7d57d96b65-gw7zd 1/1 Running 0 1m virt-controller-7d57d96b65-t5pcs 1/1 Running 0 1m virt-handler-x7w96 1/1 Running 0 1m virt-handler-zmwp5 1/1 Running 0 1m + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n default --no-headers ++ cluster/kubectl.sh get pods -n default --no-headers ++ grep -v Running + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n default + cluster/kubectl.sh get pods -n default NAME READY STATUS RESTARTS AGE docker-registry-1-tqlsm 1/1 Running 2 4d registry-console-1-bhtqz 1/1 Running 1 4d router-1-r2xxq 1/1 Running 1 4d + kubectl version + cluster/kubectl.sh version oc v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 features: Basic-Auth GSSAPI Kerberos SPNEGO Server https://127.0.0.1:32798 openshift v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 + ginko_params='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + [[ -d /home/nfs/images/windows2016 ]] + FUNC_TEST_ARGS='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + make functest hack/dockerized "hack/build-func-tests.sh" sha256:2df8b30e8f619e28e75e00ea9fa42c63f4f14b1c34fbb1223214102337507863 go version go1.10 linux/amd64 go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/functests.sh Running Suite: Tests Suite ========================== Random Seed: 1530015889 Will run 137 of 137 specs ••••••••••• ------------------------------ • [SLOW TEST:79.455 seconds] Health Monitoring /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:37 A VirtualMachineInstance with a watchdog device /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:56 should be shut down when the watchdog expires /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:57 ------------------------------ • ------------------------------ • [SLOW TEST:19.278 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 should start it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 ------------------------------ • [SLOW TEST:16.203 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 should attach virt-launcher to it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:76 ------------------------------ •••• ------------------------------ • [SLOW TEST:53.469 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:164 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Alpine as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:26.525 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:164 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Cirros as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:16.035 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:195 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:196 should retry starting the VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:197 ------------------------------ • [SLOW TEST:16.496 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:195 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:196 should log warning and proceed once the secret is there /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:227 ------------------------------ • [SLOW TEST:37.210 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 when virt-launcher crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:275 should be stopped and have Failed phase /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:276 ------------------------------ • [SLOW TEST:23.658 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 when virt-handler crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:298 should recover and continue management /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:299 ------------------------------ • [SLOW TEST:32.458 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 when virt-handler is responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:328 should indicate that a node is ready for vmis /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:329 ------------------------------ • [SLOW TEST:97.484 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 when virt-handler is not responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:359 the node controller should react /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:397 ------------------------------ S [SKIPPING] [0.067 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:450 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-default [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:455 ------------------------------ S [SKIPPING] [0.047 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:450 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-alternative [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:455 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.045 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:511 should enable emulation in virt-launcher [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:523 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:519 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.037 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:511 should be reflected in domain XML [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:560 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:519 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.058 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:64 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:511 should request a TUN device but not KVM [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:604 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:519 ------------------------------ •••• ------------------------------ • Failure [180.067 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance's Pod /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:756 should result in the VirtualMachineInstance moving to a finalized state [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:757 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ STEP: Creating the VirtualMachineInstance level=info timestamp=2018-06-26T12:32:07.058733Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmiqf44x-w7mj9" • Failure [180.227 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:788 with an active pod. /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:789 should result in pod being terminated [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:790 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ STEP: Creating the VirtualMachineInstance level=info timestamp=2018-06-26T12:35:07.288640Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmi9ctb9-lqnp6" • Failure [180.245 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:788 with grace period greater than 0 /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:813 should run graceful shutdown [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:814 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ STEP: Setting a VirtualMachineInstance termination grace period to 5 STEP: Creating the VirtualMachineInstance level=info timestamp=2018-06-26T12:38:07.562878Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmi6gxhn-r7dxw" • Failure [180.231 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:865 should be in Failed phase [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:866 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ STEP: Starting a VirtualMachineInstance level=info timestamp=2018-06-26T12:41:07.783719Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmi9qjjr-mh9t7" • Failure [180.426 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:865 should be left alone by virt-handler [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:893 Timed out after 90.191s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ STEP: Starting a VirtualMachineInstance level=info timestamp=2018-06-26T12:44:08.080528Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmihdhjx-jb2gp" • Failure [180.228 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a cirros image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should return that we are running cirros [It] /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:67 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ STEP: Creating a new VirtualMachineInstance level=info timestamp=2018-06-26T12:47:08.473798Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmirvn6f-n5s4c" • Failure [180.226 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a fedora image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:76 should return that we are running fedora [It] /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:77 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ STEP: Creating a new VirtualMachineInstance level=info timestamp=2018-06-26T12:50:08.680718Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmijnczq-m6x9j" • Failure [180.421 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 should be able to reconnect to console multiple times [It] /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:86 Timed out after 90.187s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ STEP: Creating a new VirtualMachineInstance level=info timestamp=2018-06-26T12:53:08.895503Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmipxzk6-nfwgw" • Failure [200.396 seconds] LeaderElection /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:43 Start a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:53 when the controller pod is not running /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:54 should success [It] /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:55 Timed out after 90.010s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ STEP: Destroying the leading controller pod STEP: Starting a new VirtualMachineInstance level=info timestamp=2018-06-26T12:56:29.550388Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmix2qf4-srpct" • [SLOW TEST:6.322 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should scale /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 to three, to two and then to zero replicas /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:17.240 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should scale /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 to five, to six and then to zero replicas /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •• ------------------------------ • Failure [121.100 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should update readyReplicas once VMIs are up [It] /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:157 Timed out after 120.000s. Expected : 0 to equal : 2 /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:166 ------------------------------ STEP: Create a new VirtualMachineInstance replica set STEP: Scaling to 2 STEP: Checking the number of replicas STEP: Checking the number of ready replicas •• ------------------------------ • [SLOW TEST:5.466 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should not scale when paused and scale when resume /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:223 ------------------------------ • Failure in Spec Setup (BeforeEach) [60.217 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:38 VirtualMachineInstance with slirp interface /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:47 should start the virtial machine with slirp interface [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:57 Timed out after 30.189s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:02:05.509292Z pos=utils.go:251 component=tests msg="Created virtual machine pod virt-launcher-testvmindmwr-s4rbs" • Failure in Spec Setup (BeforeEach) [60.028 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:38 VirtualMachineInstance with slirp interface /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:47 should return "Hello World" when connecting to localhost on port 80 [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:69 Timed out after 30.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:03:05.712105Z pos=utils.go:251 component=tests msg="Created virtual machine pod virt-launcher-testvmic6wzx-mkvf9" • Failure in Spec Setup (BeforeEach) [60.221 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:38 VirtualMachineInstance with slirp interface /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:47 should reject the connecting to localhost and port different than 80 [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:82 Timed out after 30.189s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:04:05.715130Z pos=utils.go:251 component=tests msg="Created virtual machine pod virt-launcher-testvmidxq9v-z8nmb" • Failure in Spec Setup (BeforeEach) [60.033 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:38 VirtualMachineInstance with slirp interface /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:47 should be able to communicate with the outside world [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:93 Timed out after 30.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:05:05.899954Z pos=utils.go:251 component=tests msg="Created virtual machine pod virt-launcher-testvmicvhrr-8r666" • Failure in Spec Setup (BeforeEach) [181.441 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to implicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:364 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.187s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:06:07.220297Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmif7jth-fs2t9" • Failure in Spec Setup (BeforeEach) [180.638 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to implicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:364 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the internet /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.189s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:09:07.771302Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmif98lb-tq7tr" • Failure in Spec Setup (BeforeEach) [180.635 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to implicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:364 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on the same node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.187s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:12:08.441836Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmikrw7s-lw67x" • Failure in Spec Setup (BeforeEach) [180.445 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to implicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:364 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on a different node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:15:09.043841Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvminmxbb-kbr7l" • Failure in Spec Setup (BeforeEach) [180.635 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to implicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:364 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on the same node from Node /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.169s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:18:09.570769Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmiprt6p-wr28g" • Failure in Spec Setup (BeforeEach) [180.447 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to implicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:364 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on a different node from Node /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:21:10.197548Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmim8gdd-l69q5" • Failure in Spec Setup (BeforeEach) [180.636 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to implicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:364 with a service matching the vmi exposed /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:277 should be able to reach the vmi based on labels specified on the vmi /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:297 Timed out after 90.176s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:24:10.620347Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmim7m97-sjb49" • Failure in Spec Setup (BeforeEach) [180.634 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to implicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:364 with a service matching the vmi exposed /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:277 should fail to reach the vmi if an invalid servicename is used /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:308 Timed out after 90.188s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:27:11.259531Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmidgsml-4lmg5" • Failure in Spec Setup (BeforeEach) [180.635 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to implicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:364 with a subdomain and a headless service given /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:324 should be able to reach the vmi via its unique fully qualified domain name /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:347 Timed out after 90.187s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:30:11.947214Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmihrqd2-smbg7" • Failure in Spec Setup (BeforeEach) [180.437 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to explicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:367 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:33:13.051962Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvminvks8-w9xdc" • Failure in Spec Setup (BeforeEach) [180.631 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to explicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:367 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the internet /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.187s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:36:12.945193Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmi96n6h-msxn7" • Failure in Spec Setup (BeforeEach) [180.635 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to explicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:367 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on the same node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.185s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:39:13.614495Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmiksst8-2fnqd" • Failure in Spec Setup (BeforeEach) [181.821 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to explicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:367 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on a different node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.188s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:42:15.401471Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmi4ww2z-j7h9z" • Failure in Spec Setup (BeforeEach) [180.444 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to explicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:367 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on the same node from Node /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:45:16.048013Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmiffqfd-dp6dw" • Failure in Spec Setup (BeforeEach) [180.441 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to explicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:367 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on a different node from Node /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.000s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:48:16.590571Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmi25m25-z6sxs" • Failure in Spec Setup (BeforeEach) [180.658 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance attached to explicit pod network [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:367 with a service matching the vmi exposed /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:277 should be able to reach the vmi based on labels specified on the vmi /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:297 Timed out after 90.190s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1026 ------------------------------ level=info timestamp=2018-06-26T13:51:17.055658Z pos=utils.go:240 component=tests msg="Created virtual machine pod virt-launcher-testvmiff2jr-l47xh" panic: test timed out after 1h30m0s goroutine 14436 [running]: testing.(*M).startAlarm.func1() /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1240 +0xfc created by time.goFunc /gimme/.gimme/versions/go1.10.linux.amd64/src/time/sleep.go:172 +0x44 goroutine 1 [chan receive, 90 minutes]: testing.(*T).Run(0xc4203470e0, 0x120ba4c, 0x9, 0x1293290, 0x47fc86) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:825 +0x301 testing.runTests.func1(0xc420346000) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1063 +0x64 testing.tRunner(0xc420346000, 0xc4208c3df8) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:777 +0xd0 testing.runTests(0xc4202b8840, 0x1ab0640, 0x1, 0x1, 0x412009) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1061 +0x2c4 testing.(*M).Run(0xc4206d8000, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:978 +0x171 main.main() _testmain.go:44 +0x151 goroutine 4 [chan receive]: kubevirt.io/kubevirt/vendor/github.com/golang/glog.(*loggingT).flushDaemon(0x1ad7160) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/golang/glog/glog.go:879 +0x8b created by kubevirt.io/kubevirt/vendor/github.com/golang/glog.init.0 /root/go/src/kubevirt.io/kubevirt/vendor/github.com/golang/glog/glog.go:410 +0x203 goroutine 5 [syscall, 90 minutes]: os/signal.signal_recv(0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/sigqueue.go:139 +0xa6 os/signal.loop() /gimme/.gimme/versions/go1.10.linux.amd64/src/os/signal/signal_unix.go:22 +0x22 created by os/signal.init.0 /gimme/.gimme/versions/go1.10.linux.amd64/src/os/signal/signal_unix.go:28 +0x41 goroutine 39 [select]: kubevirt.io/kubevirt/tests.(*ObjectEventWatcher).Watch(0xc4206b5040, 0xc4208d4a20) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:285 +0x57e kubevirt.io/kubevirt/tests.(*ObjectEventWatcher).WaitFor(0xc4206b5040, 0x1207abb, 0x6, 0x108ab80, 0x1306f80, 0x0) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:295 +0xba kubevirt.io/kubevirt/tests.waitForVMIStart(0x130fa60, 0xc4202a4b40, 0x5a, 0x0, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1012 +0x4d2 kubevirt.io/kubevirt/tests.WaitForSuccessfulVMIStart(0x130fa60, 0xc4202a4b40, 0xc42037e000, 0xc4206b5138) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 +0x43 kubevirt.io/kubevirt/tests_test.glob..func17.3(0xc4202a4b40, 0x1293268) /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:86 +0x4d kubevirt.io/kubevirt/tests_test.glob..func17.4.1(0x1136400, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:132 +0x6ac kubevirt.io/kubevirt/tests_test.glob..func17.4() /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:159 +0xd9 kubevirt.io/kubevirt/tests.BeforeAll.func1() /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1321 +0x3f kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).runSync(0xc420298120, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go:109 +0x9c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).run(0xc420298120, 0xc420313d10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go:63 +0x13e kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*SetupNode).Run(0xc42000e150, 0x130c380, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go:14 +0x7f kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec.(*Spec).runSample(0xc42039bee0, 0x0, 0x130c380, 0xc420022270) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec/spec.go:158 +0x1e0 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec.(*Spec).Run(0xc42039bee0, 0x130c380, 0xc420022270) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec/spec.go:127 +0xe3 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpec(0xc4203cc3c0, 0xc42039bee0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:198 +0x10d kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpecs(0xc4203cc3c0, 0x1293f01) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:168 +0x32c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run(0xc4203cc3c0, 0xb) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:64 +0xdc kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/suite.(*Suite).Run(0xc4200d8c30, 0x7ff4f7117c90, 0xc4203470e0, 0x120de21, 0xb, 0xc4202b88a0, 0x2, 0x2, 0x1326180, 0xc420022270, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/suite/suite.go:62 +0x27c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo.RunSpecsWithCustomReporters(0x130d100, 0xc4203470e0, 0x120de21, 0xb, 0xc4202b8860, 0x2, 0x2, 0x2) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go:218 +0x258 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo.RunSpecsWithDefaultAndCustomReporters(0x130d100, 0xc4203470e0, 0x120de21, 0xb, 0xc4201fc810, 0x1, 0x1, 0x1) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go:206 +0xab kubevirt.io/kubevirt/tests_test.TestTests(0xc4203470e0) /root/go/src/kubevirt.io/kubevirt/tests/tests_suite_test.go:42 +0xaa testing.tRunner(0xc4203470e0, 0x1293290) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:777 +0xd0 created by testing.(*T).Run /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:824 +0x2e0 goroutine 40 [chan receive, 90 minutes]: kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).registerForInterrupts(0xc4203cc3c0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:220 +0xc0 created by kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:59 +0x60 goroutine 24 [select, 90 minutes, locked to thread]: runtime.gopark(0x1294ff8, 0x0, 0x12087db, 0x6, 0x18, 0x1) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/proc.go:291 +0x11a runtime.selectgo(0xc420463750, 0xc420048120) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/select.go:392 +0xe50 runtime.ensureSigM.func1() /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/signal_unix.go:549 +0x1f4 runtime.goexit() /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/asm_amd64.s:2361 +0x1 goroutine 45 [IO wait]: internal/poll.runtime_pollWait(0x7ff4f71aaf00, 0x72, 0xc4208ef850) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/netpoll.go:173 +0x57 internal/poll.(*pollDesc).wait(0xc4207b0898, 0x72, 0xffffffffffffff00, 0x130e0a0, 0x19c8638) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_poll_runtime.go:85 +0x9b internal/poll.(*pollDesc).waitRead(0xc4207b0898, 0xc420598000, 0x8000, 0x8000) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_poll_runtime.go:90 +0x3d internal/poll.(*FD).Read(0xc4207b0880, 0xc420598000, 0x8000, 0x8000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_unix.go:157 +0x17d net.(*netFD).Read(0xc4207b0880, 0xc420598000, 0x8000, 0x8000, 0x0, 0x8, 0x7ffb) /gimme/.gimme/versions/go1.10.linux.amd64/src/net/fd_unix.go:202 +0x4f net.(*conn).Read(0xc42000e360, 0xc420598000, 0x8000, 0x8000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/net/net.go:176 +0x6a crypto/tls.(*block).readFromUntil(0xc4203dafc0, 0x7ff4f7117d40, 0xc42000e360, 0x5, 0xc42000e360, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:493 +0x96 crypto/tls.(*Conn).readRecord(0xc4200eca80, 0x1295117, 0xc4200ecba0, 0x20) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:595 +0xe0 crypto/tls.(*Conn).Read(0xc4200eca80, 0xc420746000, 0x1000, 0x1000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:1156 +0x100 bufio.(*Reader).Read(0xc4200bf8c0, 0xc4206fae38, 0x9, 0x9, 0xc4204ad8a8, 0x1, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/bufio/bufio.go:216 +0x238 io.ReadAtLeast(0x130afe0, 0xc4200bf8c0, 0xc4206fae38, 0x9, 0x9, 0x9, 0xc42075d2a0, 0x43f2c1, 0xc420ac3680) /gimme/.gimme/versions/go1.10.linux.amd64/src/io/io.go:309 +0x86 io.ReadFull(0x130afe0, 0xc4200bf8c0, 0xc4206fae38, 0x9, 0x9, 0x1295140, 0xc4208efd10, 0x462d33) /gimme/.gimme/versions/go1.10.linux.amd64/src/io/io.go:327 +0x58 kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.readFrameHeader(0xc4206fae38, 0x9, 0x9, 0x130afe0, 0xc4200bf8c0, 0x0, 0xc400000000, 0x8488d0, 0xc42065dce8) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/frame.go:237 +0x7b kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*Framer).ReadFrame(0xc4206fae00, 0xc4203f0720, 0x0, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/frame.go:492 +0xa4 kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*clientConnReadLoop).run(0xc4208effb0, 0x12941a8, 0xc42007afb0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:1428 +0x8e kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*ClientConn).readLoop(0xc4206e44e0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:1354 +0x76 created by kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*Transport).newClientConn /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:579 +0x651 goroutine 14435 [chan receive]: kubevirt.io/kubevirt/tests.(*ObjectEventWatcher).Watch.func3(0x1313d60, 0xc4208320c0, 0x0, 0xc420a2c7d0, 0xc420b97860) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:272 +0x93 created by kubevirt.io/kubevirt/tests.(*ObjectEventWatcher).Watch /root/go/src/kubevirt.io/kubevirt/tests/utils.go:270 +0x4b3 goroutine 14434 [semacquire]: sync.runtime_notifyListWait(0xc42065dd00, 0xc400000001) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/sema.go:510 +0x10b sync.(*Cond).Wait(0xc42065dcf0) /gimme/.gimme/versions/go1.10.linux.amd64/src/sync/cond.go:56 +0x80 kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*pipe).Read(0xc42065dce8, 0xc4209b3801, 0x5ff, 0x5ff, 0x0, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/pipe.go:64 +0x8f kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.transportResponseBody.Read(0xc42065dcc0, 0xc4209b3801, 0x5ff, 0x5ff, 0x0, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:1674 +0xa1 encoding/json.(*Decoder).refill(0xc420a43e00, 0x88950a, 0x9) /gimme/.gimme/versions/go1.10.linux.amd64/src/encoding/json/stream.go:159 +0x132 encoding/json.(*Decoder).readValue(0xc420a43e00, 0x0, 0x0, 0x109ae40) /gimme/.gimme/versions/go1.10.linux.amd64/src/encoding/json/stream.go:134 +0x23d encoding/json.(*Decoder).Decode(0xc420a43e00, 0x10b3ec0, 0xc4208cb740, 0x1311c20, 0xc4206cca00) /gimme/.gimme/versions/go1.10.linux.amd64/src/encoding/json/stream.go:63 +0x78 kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/util/framer.(*jsonFrameReader).Read(0xc420832090, 0xc4201c1800, 0x400, 0x400, 0xc4209be8c0, 0x40, 0x38) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go:150 +0x295 kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming.(*decoder).Decode(0xc4203994a0, 0x0, 0x13137e0, 0xc4209be8c0, 0x380, 0x1311c20, 0xc420b97858, 0x456ae0, 0xc420b97800) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go:77 +0x95 kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest/watch.(*Decoder).Decode(0xc420ab2fa0, 0xc420a6ffa8, 0x5, 0x1311c20, 0xc4206cca00, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest/watch/decoder.go:49 +0x7c kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/watch.(*StreamWatcher).receive(0xc4208320c0) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go:93 +0x12e created by kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/watch.NewStreamWatcher /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go:60 +0xa8 make: *** [functest] Error 2 + make cluster-down ./cluster/down.sh