+ export WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + [[ openshift-3.10-release =~ openshift-.* ]] + [[ openshift-3.10-release =~ .*-crio-.* ]] + export KUBEVIRT_PROVIDER=os-3.10.0 + KUBEVIRT_PROVIDER=os-3.10.0 + export KUBEVIRT_NUM_NODES=2 + KUBEVIRT_NUM_NODES=2 + export NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + export NAMESPACE=kube-system + NAMESPACE=kube-system + trap '{ make cluster-down; }' EXIT SIGINT SIGTERM SIGSTOP + make cluster-down ./cluster/down.sh + make cluster-up ./cluster/up.sh Downloading ....... Downloading ....... 2018/07/27 06:17:52 Waiting for host: 192.168.66.102:22 2018/07/27 06:17:55 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/27 06:18:03 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/27 06:18:11 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/27 06:18:19 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/27 06:18:24 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: connection refused. Sleeping 5s 2018/07/27 06:18:29 Connected to tcp://192.168.66.102:22 + systemctl stop origin-node.service + rm -rf /etc/origin/ /etc/etcd/ /var/lib/origin /var/lib/etcd/ ++ docker ps -q + containers= + '[' -n '' ']' ++ docker ps -q -a + containers='2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3' + '[' -n '2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3' ']' + docker rm -f 2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3 2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3 2018/07/27 06:18:32 Waiting for host: 192.168.66.101:22 2018/07/27 06:18:35 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/27 06:18:43 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/27 06:18:51 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/27 06:18:59 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/27 06:19:05 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: connection refused. Sleeping 5s 2018/07/27 06:19:10 Connected to tcp://192.168.66.101:22 + inventory_file=/root/inventory + openshift_ansible=/root/openshift-ansible + echo '[new_nodes]' + sed -i '/\[OSEv3:children\]/a new_nodes' /root/inventory + nodes_found=false ++ seq 2 100 + for i in '$(seq 2 100)' ++ printf node%02d 2 + node=node02 ++ printf 192.168.66.1%02d 2 + node_ip=192.168.66.102 + set +e + ping 192.168.66.102 -c 1 PING 192.168.66.102 (192.168.66.102) 56(84) bytes of data. 64 bytes from 192.168.66.102: icmp_seq=1 ttl=64 time=0.763 ms --- 192.168.66.102 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 0.763/0.763/0.763/0.000 ms Found node02. Adding it to the inventory. + '[' 0 -ne 0 ']' + nodes_found=true + set -e + echo '192.168.66.102 node02' + echo 'Found node02. Adding it to the inventory.' + echo 'node02 openshift_node_group_name="node-config-compute" openshift_schedulable=true openshift_ip=192.168.66.102' + for i in '$(seq 2 100)' ++ printf node%02d 3 + node=node03 ++ printf 192.168.66.1%02d 3 + node_ip=192.168.66.103 + set +e + ping 192.168.66.103 -c 1 PING 192.168.66.103 (192.168.66.103) 56(84) bytes of data. From 192.168.66.101 icmp_seq=1 Destination Host Unreachable --- 192.168.66.103 ping statistics --- 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms + '[' 1 -ne 0 ']' + break + '[' true = true ']' + ansible-playbook -i /root/inventory /root/openshift-ansible/playbooks/openshift-node/scaleup.yml PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) PLAY [Ensure there are new_nodes] ********************************************** TASK [fail] ******************************************************************** skipping: [localhost] TASK [fail] ******************************************************************** skipping: [localhost] PLAY [Initialization Checkpoint Start] ***************************************** TASK [Set install initialization 'In Progress'] ******************************** ok: [node01] PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) [WARNING]: Could not match supplied host pattern, ignoring: oo_lb_to_config PLAY [Ensure that all non-node hosts are accessible] *************************** TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Initialize basic host facts] ********************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/deprecations.yml for node01, node02 TASK [openshift_sanitize_inventory : Check for usage of deprecated variables] *** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : debug] ************************************ skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_stats] ******************************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Assign deprecated variables to correct counterparts] *** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml for node01, node02 included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml for node01, node02 TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Standardize on latest variable names] ***** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Normalize openshift_release] ************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/unsupported.yml for node01, node02 TASK [openshift_sanitize_inventory : Ensure that openshift_use_dnsmasq is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that openshift_node_dnsmasq_install_network_manager_hook is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* skipping: [node01] => (item=openshift_hosted_etcd_storage_kind) skipping: [node02] => (item=openshift_hosted_etcd_storage_kind) TASK [openshift_sanitize_inventory : Ensure that dynamic provisioning is set if using dynamic storage] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure clusterid is set along with the cloudprovider] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that all requires vsphere configuration variables are set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : ensure provider configuration variables are defined] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure removed web console extension variables are not set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that web console port matches API server port] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : At least one master is schedulable] ******* skipping: [node01] skipping: [node02] TASK [Detecting Operating System from ostree_booted] *************************** ok: [node02] ok: [node01] TASK [set openshift_deployment_type if unset] ********************************** skipping: [node01] skipping: [node02] TASK [check for node already bootstrapped] ************************************* ok: [node02] ok: [node01] TASK [initialize_facts set fact openshift_is_bootstrapped] ********************* ok: [node01] ok: [node02] TASK [initialize_facts set fact openshift_is_atomic and openshift_is_containerized] *** ok: [node01] ok: [node02] TASK [Determine Atomic Host Docker Version] ************************************ skipping: [node01] skipping: [node02] TASK [assert atomic host docker version is 1.12 or later] ********************** skipping: [node01] skipping: [node02] PLAY [Retrieve existing master configs and validate] *************************** TASK [openshift_control_plane : stat] ****************************************** ok: [node01] TASK [openshift_control_plane : slurp] ***************************************** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [openshift_control_plane : Check for file paths outside of /etc/origin/master in master's config] *** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Initialize special first-master variables] ******************************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Disable web console if required] ***************************************** TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Setup yum repositories for all hosts] ************************************ TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Install Red Hat Subscription manager] ******************* skipping: [node02] TASK [rhel_subscribe : Is host already registered?] **************************** skipping: [node02] TASK [rhel_subscribe : Register host] ****************************************** skipping: [node02] TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Determine if OpenShift Pool Already Attached] *********** skipping: [node02] TASK [rhel_subscribe : Attach to OpenShift Pool] ******************************* skipping: [node02] TASK [rhel_subscribe : Satellite preparation] ********************************** skipping: [node02] TASK [openshift_repos : openshift_repos detect ostree] ************************* ok: [node02] TASK [openshift_repos : Ensure libselinux-python is installed] ***************** ok: [node02] TASK [openshift_repos : Remove openshift_additional.repo file] ***************** ok: [node02] TASK [openshift_repos : Create any additional repos that are defined] ********** TASK [openshift_repos : include_tasks] ***************************************** skipping: [node02] TASK [openshift_repos : include_tasks] ***************************************** included: /root/openshift-ansible/roles/openshift_repos/tasks/centos_repos.yml for node02 TASK [openshift_repos : Configure origin gpg keys] ***************************** ok: [node02] TASK [openshift_repos : Configure correct origin release repository] *********** ok: [node02] => (item=/root/openshift-ansible/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2) TASK [openshift_repos : Ensure clean repo cache in the event repos have been changed manually] *** changed: [node02] => { "msg": "First run of openshift_repos" } TASK [openshift_repos : Record that openshift_repos already ran] *************** ok: [node02] RUNNING HANDLER [openshift_repos : refresh cache] ****************************** changed: [node02] PLAY [Install packages necessary for installer] ******************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Determine if chrony is installed] **************************************** changed: [node02] [WARNING]: Consider using the yum, dnf or zypper module rather than running rpm. If you need to use command because yum, dnf or zypper is insufficient you can add warn=False to this command task or set command_warnings=False in ansible.cfg to get rid of this message. TASK [Install ntp package] ***************************************************** skipping: [node02] TASK [Start and enable ntpd/chronyd] ******************************************* changed: [node02] TASK [Ensure openshift-ansible installer package deps are installed] *********** ok: [node02] => (item=iproute) ok: [node02] => (item=dbus-python) ok: [node02] => (item=PyYAML) ok: [node02] => (item=python-ipaddress) ok: [node02] => (item=libsemanage-python) ok: [node02] => (item=yum-utils) ok: [node02] => (item=python-docker) PLAY [Initialize cluster facts] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [get openshift_current_version] ******************************************* ok: [node02] ok: [node01] TASK [set_fact openshift_portal_net if present on masters] ********************* ok: [node01] ok: [node02] TASK [Gather Cluster facts] **************************************************** changed: [node02] changed: [node01] TASK [Set fact of no_proxy_internal_hostnames] ********************************* skipping: [node01] skipping: [node02] TASK [Initialize openshift.node.sdn_mtu] *************************************** changed: [node02] ok: [node01] PLAY [Initialize etcd host variables] ****************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** ok: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] => { "msg": "openshift_pkg_version was not defined. Falling back to -3.10.0" } TASK [openshift_version : set_fact] ******************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Verify Requirements] ***************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [Run variable sanity checks] ********************************************** ok: [node01] TASK [Validate openshift_node_groups and openshift_node_group_name] ************ ok: [node01] PLAY [Initialization Checkpoint End] ******************************************* TASK [Set install initialization 'Complete'] *********************************** ok: [node01] PLAY [Validate node hostnames] ************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Query DNS for IP address of node02] ************************************** ok: [node02] TASK [Validate openshift_hostname when defined] ******************************** skipping: [node02] TASK [Validate openshift_ip exists on node when defined] *********************** skipping: [node02] PLAY [Configure os_firewall] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [os_firewall : Detecting Atomic Host Operating System] ******************** ok: [node02] TASK [os_firewall : Set fact r_os_firewall_is_atomic] ************************** ok: [node02] TASK [os_firewall : Fail - Firewalld is not supported on Atomic Host] ********** skipping: [node02] TASK [os_firewall : Install firewalld packages] ******************************** skipping: [node02] TASK [os_firewall : Ensure iptables services are not enabled] ****************** skipping: [node02] => (item=iptables) skipping: [node02] => (item=ip6tables) TASK [os_firewall : Wait 10 seconds after disabling iptables] ****************** skipping: [node02] TASK [os_firewall : Start and enable firewalld service] ************************ skipping: [node02] TASK [os_firewall : need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail] *** skipping: [node02] TASK [os_firewall : Restart polkitd] ******************************************* skipping: [node02] TASK [os_firewall : Wait for polkit action to have been created] *************** skipping: [node02] TASK [os_firewall : Ensure firewalld service is not enabled] ******************* ok: [node02] TASK [os_firewall : Wait 10 seconds after disabling firewalld] ***************** skipping: [node02] TASK [os_firewall : Install iptables packages] ********************************* ok: [node02] => (item=iptables) ok: [node02] => (item=iptables-services) TASK [os_firewall : Start and enable iptables service] ************************* ok: [node02 -> node02] => (item=node02) TASK [os_firewall : need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail] *** skipping: [node02] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [container_runtime : Setup the docker-storage for overlay] **************** skipping: [node02] TASK [container_runtime : Create file system on extra volume device] *********** TASK [container_runtime : Create mount entry for extra volume] ***************** PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** ok: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* ok: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Add http_proxy to /etc/atomic.conf] ****************** skipping: [node02] TASK [container_runtime : Add https_proxy to /etc/atomic.conf] ***************** skipping: [node02] TASK [container_runtime : Add no_proxy to /etc/atomic.conf] ******************** skipping: [node02] TASK [container_runtime : Get current installed Docker version] **************** ok: [node02] TASK [container_runtime : Error out if Docker pre-installed but too old] ******* skipping: [node02] TASK [container_runtime : Error out if requested Docker is too old] ************ skipping: [node02] TASK [container_runtime : Install Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure docker.service.d directory exists] ************ ok: [node02] TASK [container_runtime : Configure Docker service unit file] ****************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Set registry params] ********************************* skipping: [node02] => (item={u'reg_conf_var': u'ADD_REGISTRY', u'reg_flag': u'--add-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'BLOCK_REGISTRY', u'reg_flag': u'--block-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'INSECURE_REGISTRY', u'reg_flag': u'--insecure-registry', u'reg_fact_val': []}) TASK [container_runtime : Place additional/blocked/insecure registries in /etc/containers/registries.conf] *** skipping: [node02] TASK [container_runtime : Set Proxy Settings] ********************************** skipping: [node02] => (item={u'reg_conf_var': u'HTTP_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'HTTPS_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'NO_PROXY', u'reg_fact_val': u''}) TASK [container_runtime : Set various Docker options] ************************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Configure Docker Network OPTIONS] ******************** ok: [node02] TASK [container_runtime : Detect if docker is already started] ***************** ok: [node02] TASK [container_runtime : Start the Docker service] **************************** ok: [node02] TASK [container_runtime : set_fact] ******************************************** ok: [node02] TASK [container_runtime : Check for docker_storage_path/overlay2] ************** ok: [node02] TASK [container_runtime : Fixup SELinux permissions for docker] **************** changed: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] TASK [container_runtime : Fail if Atomic Host since this is an rpm request] **** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Check that overlay is in the kernel] ***************** skipping: [node02] TASK [container_runtime : Add overlay to modprobe.d] *************************** skipping: [node02] TASK [container_runtime : Manually modprobe overlay into the kernel] *********** skipping: [node02] TASK [container_runtime : Enable and start systemd-modules-load] *************** skipping: [node02] TASK [container_runtime : Install cri-o] *************************************** skipping: [node02] TASK [container_runtime : Remove CRI-O default configuration files] ************ skipping: [node02] => (item=/etc/cni/net.d/200-loopback.conf) skipping: [node02] => (item=/etc/cni/net.d/100-crio-bridge.conf) TASK [container_runtime : Create the CRI-O configuration] ********************** skipping: [node02] TASK [container_runtime : Ensure CNI configuration directory exists] *********** skipping: [node02] TASK [container_runtime : Add iptables allow rules] **************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove iptables rules] ******************************* TASK [container_runtime : Add firewalld allow rules] *************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove firewalld allow rules] ************************ TASK [container_runtime : Configure the CNI network] *************************** skipping: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-network] ****************** skipping: [node02] TASK [container_runtime : Start the CRI-O service] ***************************** skipping: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* skipping: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** skipping: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** skipping: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** skipping: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Node Preparation Checkpoint Start] *************************************** TASK [Set Node preparation 'In Progress'] ************************************** ok: [node01] PLAY [Only target nodes that have not yet been bootstrapped] ******************* TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [add_host] **************************************************************** skipping: [localhost] => (item=node02) ok: [localhost] => (item=node01) PLAY [Disable excluders] ******************************************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/disable.yml for node02 TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-docker-excluder version detected] ************ skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-docker-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-excluder version detected] ******************* skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** skipping: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] PLAY [Configure nodes] ********************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_cloud_provider : Set cloud provider facts] ********************* skipping: [node02] TASK [openshift_cloud_provider : Create cloudprovider config dir] ************** skipping: [node02] TASK [openshift_cloud_provider : include the defined cloud provider files] ***** skipping: [node02] TASK [openshift_node : fail] *************************************************** skipping: [node02] TASK [openshift_node : Check for NetworkManager service] *********************** ok: [node02] TASK [openshift_node : Set fact using_network_manager] ************************* ok: [node02] TASK [openshift_node : Install dnsmasq] **************************************** ok: [node02] TASK [openshift_node : ensure origin/node directory exists] ******************** changed: [node02] => (item=/etc/origin) changed: [node02] => (item=/etc/origin/node) TASK [openshift_node : Install NetworkManager during node_bootstrap provisioning] *** skipping: [node02] TASK [openshift_node : Install network manager dispatch script] **************** skipping: [node02] TASK [openshift_node : Install dnsmasq configuration] ************************** ok: [node02] TASK [openshift_node : Deploy additional dnsmasq.conf] ************************* skipping: [node02] TASK [openshift_node : Enable dnsmasq] ***************************************** ok: [node02] TASK [openshift_node : Install network manager dispatch script] **************** ok: [node02] TASK [openshift_node : Add iptables allow rules] ******************************* ok: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) ok: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) ok: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) ok: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) ok: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove iptables rules] ********************************** TASK [openshift_node : Add firewalld allow rules] ****************************** skipping: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) skipping: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) skipping: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) skipping: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) skipping: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove firewalld allow rules] *************************** TASK [openshift_node : Checking for journald.conf] ***************************** ok: [node02] TASK [openshift_node : Create journald persistence directories] **************** ok: [node02] TASK [openshift_node : Update journald setup] ********************************** ok: [node02] => (item={u'var': u'Storage', u'val': u'persistent'}) ok: [node02] => (item={u'var': u'Compress', u'val': True}) ok: [node02] => (item={u'var': u'SyncIntervalSec', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitInterval', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitBurst', u'val': 10000}) ok: [node02] => (item={u'var': u'SystemMaxUse', u'val': u'8G'}) ok: [node02] => (item={u'var': u'SystemKeepFree', u'val': u'20%'}) ok: [node02] => (item={u'var': u'SystemMaxFileSize', u'val': u'10M'}) ok: [node02] => (item={u'var': u'MaxRetentionSec', u'val': u'1month'}) ok: [node02] => (item={u'var': u'MaxFileSec', u'val': u'1day'}) ok: [node02] => (item={u'var': u'ForwardToSyslog', u'val': False}) ok: [node02] => (item={u'var': u'ForwardToWall', u'val': False}) TASK [openshift_node : Restart journald] *************************************** skipping: [node02] TASK [openshift_node : Disable swap] ******************************************* ok: [node02] TASK [openshift_node : Install node, clients, and conntrack packages] ********** ok: [node02] => (item={u'name': u'origin-node-3.10.0*'}) ok: [node02] => (item={u'name': u'origin-clients-3.10.0*'}) ok: [node02] => (item={u'name': u'conntrack-tools'}) TASK [openshift_node : Restart cri-o] ****************************************** skipping: [node02] TASK [openshift_node : restart NetworkManager to ensure resolv.conf is present] *** changed: [node02] TASK [openshift_node : sysctl] ************************************************* ok: [node02] TASK [openshift_node : Check for credentials file for registry auth] *********** skipping: [node02] TASK [openshift_node : Create credentials for registry auth] ******************* skipping: [node02] TASK [openshift_node : Create credentials for registry auth (alternative)] ***** skipping: [node02] TASK [openshift_node : Setup ro mount of /root/.docker for containerized hosts] *** skipping: [node02] TASK [openshift_node : Check that node image is present] *********************** changed: [node02] TASK [openshift_node : Pre-pull node image] ************************************ skipping: [node02] TASK [openshift_node : Copy node script to the node] *************************** ok: [node02] TASK [openshift_node : Install Node service file] ****************************** ok: [node02] TASK [openshift_node : Ensure old system path is set] ************************** skipping: [node02] => (item=/etc/origin/openvswitch) skipping: [node02] => (item=/var/lib/kubelet) skipping: [node02] => (item=/opt/cni/bin) TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Copy node container image to ostree storage] ************ skipping: [node02] TASK [openshift_node : Install or Update node system container] **************** skipping: [node02] TASK [openshift_node : Restart network manager to ensure networking configuration is in place] *** skipping: [node02] TASK [openshift_node : Configure Node settings] ******************************** ok: [node02] => (item={u'regex': u'^OPTIONS=', u'line': u'OPTIONS='}) ok: [node02] => (item={u'regex': u'^DEBUG_LOGLEVEL=', u'line': u'DEBUG_LOGLEVEL=2'}) ok: [node02] => (item={u'regex': u'^IMAGE_VERSION=', u'line': u'IMAGE_VERSION=v3.10.0-rc.0'}) TASK [openshift_node : Configure Proxy Settings] ******************************* skipping: [node02] => (item={u'regex': u'^HTTP_PROXY=', u'line': u'HTTP_PROXY='}) skipping: [node02] => (item={u'regex': u'^HTTPS_PROXY=', u'line': u'HTTPS_PROXY='}) skipping: [node02] => (item={u'regex': u'^NO_PROXY=', u'line': u'NO_PROXY=[],172.30.0.0/16,10.128.0.0/14'}) TASK [openshift_node : file] *************************************************** skipping: [node02] TASK [openshift_node : Create the Node config] ********************************* changed: [node02] TASK [openshift_node : Configure Node Environment Variables] ******************* TASK [openshift_node : Ensure the node static pod directory exists] ************ changed: [node02] TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Install NFS storage plugin dependencies] **************** ok: [node02] TASK [openshift_node : Check for existence of nfs sebooleans] ****************** ok: [node02] => (item=virt_use_nfs) ok: [node02] => (item=virt_sandbox_use_nfs) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-27 06:27:40.983985', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.113998', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-27 06:27:40.869987', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-27 06:27:42.232192', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.017973', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-27 06:27:42.214219', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-27 06:27:40.983985', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.113998', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-27 06:27:40.869987', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-27 06:27:42.232192', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.017973', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-27 06:27:42.214219', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install GlusterFS storage plugin dependencies] ********** ok: [node02] TASK [openshift_node : Check for existence of fusefs sebooleans] *************** ok: [node02] => (item=virt_use_fusefs) ok: [node02] => (item=virt_sandbox_use_fusefs) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-27 06:27:48.901763', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.019104', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-27 06:27:48.882659', '_ansible_ignore_errors': None, 'failed': False}) ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-27 06:27:50.042723', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.017381', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-27 06:27:50.025342', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-27 06:27:48.901763', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.019104', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-27 06:27:48.882659', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-27 06:27:50.042723', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.017381', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-27 06:27:50.025342', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install Ceph storage plugin dependencies] *************** ok: [node02] TASK [openshift_node : Install iSCSI storage plugin dependencies] ************** ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=device-mapper-multipath) TASK [openshift_node : restart services] *************************************** ok: [node02] => (item=multipathd) ok: [node02] => (item=rpcbind) ok: [node02] => (item=iscsid) TASK [openshift_node : Template multipath configuration] *********************** changed: [node02] TASK [openshift_node : Enable and start multipath] ***************************** changed: [node02] TASK [tuned : Check for tuned package] ***************************************** ok: [node02] TASK [tuned : Set tuned OpenShift variables] *********************************** ok: [node02] TASK [tuned : Ensure directory structure exists] ******************************* ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Ensure files are populated from templates] *********************** skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Make tuned use the recommended tuned profile on restart] ********* changed: [node02] => (item=/etc/tuned/active_profile) changed: [node02] => (item=/etc/tuned/profile_mode) TASK [tuned : Restart tuned service] ******************************************* changed: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Install logrotate] ******* ok: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Setup logrotate.d scripts] *** PLAY [node bootstrap config] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_node : install needed rpm(s)] ********************************** ok: [node02] => (item=origin-node) ok: [node02] => (item=origin-docker-excluder) ok: [node02] => (item=ansible) ok: [node02] => (item=bash-completion) ok: [node02] => (item=docker) ok: [node02] => (item=haproxy) ok: [node02] => (item=dnsmasq) ok: [node02] => (item=ntp) ok: [node02] => (item=logrotate) ok: [node02] => (item=httpd-tools) ok: [node02] => (item=bind-utils) ok: [node02] => (item=firewalld) ok: [node02] => (item=libselinux-python) ok: [node02] => (item=conntrack-tools) ok: [node02] => (item=openssl) ok: [node02] => (item=iproute) ok: [node02] => (item=python-dbus) ok: [node02] => (item=PyYAML) ok: [node02] => (item=yum-utils) ok: [node02] => (item=glusterfs-fuse) ok: [node02] => (item=device-mapper-multipath) ok: [node02] => (item=nfs-utils) ok: [node02] => (item=cockpit-ws) ok: [node02] => (item=cockpit-system) ok: [node02] => (item=cockpit-bridge) ok: [node02] => (item=cockpit-docker) ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=ceph-common) TASK [openshift_node : create the directory for node] ************************** skipping: [node02] TASK [openshift_node : laydown systemd override] ******************************* skipping: [node02] TASK [openshift_node : update the sysconfig to have necessary variables] ******* ok: [node02] => (item={u'regexp': u'^KUBECONFIG=.*', u'line': u'KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig'}) TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : disable origin-node service] **************************** changed: [node02] => (item=origin-node.service) TASK [openshift_node : Check for RPM generated config marker file .config_managed] *** ok: [node02] TASK [openshift_node : create directories for bootstrapping] ******************* ok: [node02] => (item=/root/openshift_bootstrap) changed: [node02] => (item=/var/lib/origin/openshift.local.config) changed: [node02] => (item=/var/lib/origin/openshift.local.config/node) ok: [node02] => (item=/etc/docker/certs.d/docker-registry.default.svc:5000) TASK [openshift_node : laydown the bootstrap.yml file for on boot configuration] *** ok: [node02] TASK [openshift_node : Create a symlink to the node client CA for the docker registry] *** ok: [node02] TASK [openshift_node : Remove RPM generated config files if present] *********** skipping: [node02] => (item=master) skipping: [node02] => (item=.config_managed) TASK [openshift_node : find all files in /etc/origin/node so we can remove them] *** skipping: [node02] TASK [openshift_node : Remove everything except the resolv.conf required for node] *** skipping: [node02] TASK [openshift_node_group : create node config template] ********************** changed: [node02] TASK [openshift_node_group : remove existing node config] ********************** changed: [node02] TASK [openshift_node_group : Ensure required directories are present] ********** ok: [node02] => (item=/etc/origin/node/pods) changed: [node02] => (item=/etc/origin/node/certificates) TASK [openshift_node_group : Update the sysconfig to group "node-config-compute"] *** changed: [node02] TASK [set_fact] **************************************************************** ok: [node02] PLAY [Re-enable excluder if it was previously enabled] ************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/enable.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] PLAY [Node Preparation Checkpoint End] ***************************************** TASK [Set Node preparation 'Complete'] ***************************************** ok: [node01] PLAY [Distribute bootstrap and start nodes] ************************************ TASK [openshift_node : Gather node information] ******************************** changed: [node02] ok: [node01] TASK [openshift_node : Copy master bootstrap config locally] ******************* ok: [node02] TASK [openshift_node : Distribute bootstrap kubeconfig if one does not exist] *** ok: [node01] changed: [node02] TASK [openshift_node : Start and enable node for bootstrapping] **************** changed: [node02] changed: [node01] TASK [openshift_node : Get node logs] ****************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : debug] ************************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : fail] *************************************************** skipping: [node02] skipping: [node01] PLAY [Approve any pending CSR requests from inventory nodes] ******************* TASK [Dump all candidate bootstrap hostnames] ********************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Find all hostnames for bootstrapping] ************************************ ok: [node01] TASK [Dump the bootstrap hostnames] ******************************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Approve bootstrap nodes] ************************************************* changed: [node01] TASK [Get CSRs] **************************************************************** skipping: [node01] TASK [Report approval errors] ************************************************** skipping: [node01] PLAY [Ensure any inventory labels are applied to the nodes] ******************** TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_manage_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_manage_node : Wait for Node Registration] ********************** ok: [node01 -> node01] ok: [node02 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/config.yml for node02, node01 TASK [openshift_manage_node : Set node schedulability] ************************* ok: [node01 -> node01] ok: [node02 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/set_default_node_role.yml for node02, node01 TASK [openshift_manage_node : Retrieve nodes that are marked with the infra selector or the legacy infra selector] *** ok: [node02 -> node01] TASK [openshift_manage_node : Label infra or legacy infra nodes with the new role label] *** TASK [openshift_manage_node : Retrieve non-infra, non-master nodes that are not yet labeled compute] *** ok: [node02 -> node01] TASK [openshift_manage_node : label non-master non-infra nodes compute] ******** TASK [openshift_manage_node : Label all-in-one master as a compute node] ******* skipping: [node02] PLAY RECAP ********************************************************************* localhost : ok=30 changed=0 unreachable=0 failed=0 node01 : ok=71 changed=3 unreachable=0 failed=0 node02 : ok=155 changed=33 unreachable=0 failed=0 INSTALLER STATUS *************************************************************** Initialization : Complete (0:04:24) Node Preparation : Complete (0:04:29) Sending file modes: C0755 110489328 oc Sending file modes: C0600 5649 admin.kubeconfig Cluster "node01:8443" set. Cluster "node01:8443" set. + set +e + kubectl get nodes --no-headers + cluster/kubectl.sh get nodes --no-headers node01 Ready compute,infra,master 18d v1.10.0+b81c8f8 node02 Ready compute 52s v1.10.0+b81c8f8 + kubectl_rc=0 + '[' 0 -ne 0 ']' ++ kubectl get nodes --no-headers ++ cluster/kubectl.sh get nodes --no-headers ++ grep NotReady + '[' -n '' ']' + set -e + echo 'Nodes are ready:' Nodes are ready: + kubectl get nodes + cluster/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION node01 Ready compute,infra,master 18d v1.10.0+b81c8f8 node02 Ready compute 53s v1.10.0+b81c8f8 + make cluster-sync ./cluster/build.sh Building ... sha256:ceba12cbc33e4e37a707840478a630db561e2427b78c8c9f9cd6d0b73276ab32 go version go1.10 linux/amd64 go version go1.10 linux/amd64 make[1]: Entering directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' hack/dockerized "./hack/check.sh && KUBEVIRT_VERSION= ./hack/build-go.sh install " && ./hack/build-copy-artifacts.sh sha256:ceba12cbc33e4e37a707840478a630db561e2427b78c8c9f9cd6d0b73276ab32 go version go1.10 linux/amd64 go version go1.10 linux/amd64 find: '/root/go/src/kubevirt.io/kubevirt/_out/cmd': No such file or directory Compiling tests... compiled tests.test hack/build-docker.sh build Sending build context to Docker daemon 40.38 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-controller ---> Using cache ---> b4f3251c6468 Step 4/8 : WORKDIR /home/virt-controller ---> Using cache ---> 813752072d9d Step 5/8 : USER 1001 ---> Using cache ---> 88b3556f36b4 Step 6/8 : COPY virt-controller /usr/bin/virt-controller ---> Using cache ---> e40e20bfd913 Step 7/8 : ENTRYPOINT /usr/bin/virt-controller ---> Using cache ---> 5649e7c1f141 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-controller" '' ---> Running in 2898040d5217 ---> 9e6f975ebbe2 Removing intermediate container 2898040d5217 Successfully built 9e6f975ebbe2 Sending build context to Docker daemon 43.31 MB Step 1/10 : FROM kubevirt/libvirt:4.2.0 ---> 5f0bfe81a3e0 Step 2/10 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 09010a005182 Step 3/10 : RUN dnf -y install socat genisoimage util-linux libcgroup-tools ethtool net-tools sudo && dnf -y clean all && test $(id -u qemu) = 107 # make sure that the qemu user really is 107 ---> Using cache ---> fc9481693838 Step 4/10 : COPY virt-launcher /usr/bin/virt-launcher ---> Using cache ---> d2f3d20792f0 Step 5/10 : COPY kubevirt-sudo /etc/sudoers.d/kubevirt ---> Using cache ---> 46a648a9c75c Step 6/10 : RUN setcap CAP_NET_BIND_SERVICE=+eip /usr/bin/qemu-system-x86_64 ---> Using cache ---> 06dfe1cb4f54 Step 7/10 : RUN mkdir -p /usr/share/kubevirt/virt-launcher ---> Using cache ---> c807947dcb57 Step 8/10 : COPY entrypoint.sh libvirtd.sh sock-connector /usr/share/kubevirt/virt-launcher/ ---> Using cache ---> 716fcba14eed Step 9/10 : ENTRYPOINT /usr/share/kubevirt/virt-launcher/entrypoint.sh ---> Using cache ---> 869a5b2bf946 Step 10/10 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-launcher" '' ---> Running in 3272460dec3f ---> af8b0b0a413b Removing intermediate container 3272460dec3f Successfully built af8b0b0a413b Sending build context to Docker daemon 41.69 MB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/5 : COPY virt-handler /usr/bin/virt-handler ---> Using cache ---> d40c041d1ac0 Step 4/5 : ENTRYPOINT /usr/bin/virt-handler ---> Using cache ---> ca804d8cfab3 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-handler" '' ---> Running in fb3b60511b47 ---> f6cd87c2c16c Removing intermediate container fb3b60511b47 Successfully built f6cd87c2c16c Sending build context to Docker daemon 38.81 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-api ---> Using cache ---> 3cff23506e80 Step 4/8 : WORKDIR /home/virt-api ---> Using cache ---> e94c5606b96b Step 5/8 : USER 1001 ---> Using cache ---> af16317199f5 Step 6/8 : COPY virt-api /usr/bin/virt-api ---> Using cache ---> fd4ef1550e4a Step 7/8 : ENTRYPOINT /usr/bin/virt-api ---> Using cache ---> e40b509c6723 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-api" '' ---> Running in 5c7bd92734da ---> 9eb3853b504b Removing intermediate container 5c7bd92734da Successfully built 9eb3853b504b Sending build context to Docker daemon 4.096 kB Step 1/7 : FROM fedora:28 ---> cc510acfcd70 Step 2/7 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/7 : ENV container docker ---> Using cache ---> aed3ca4ac3a3 Step 4/7 : RUN mkdir -p /images/custom /images/alpine && truncate -s 64M /images/custom/disk.img && curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /images/alpine/disk.img ---> Using cache ---> c7d0cf8fc982 Step 5/7 : ADD entrypoint.sh / ---> Using cache ---> 0393e5ee0c37 Step 6/7 : CMD /entrypoint.sh ---> Using cache ---> 23798f49dea3 Step 7/7 : LABEL "disks-images-provider" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> 628bfca144bf Successfully built 628bfca144bf Sending build context to Docker daemon 2.56 kB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/5 : ENV container docker ---> Using cache ---> aed3ca4ac3a3 Step 4/5 : RUN dnf -y install procps-ng nmap-ncat && dnf -y clean all ---> Using cache ---> d8c990eaf575 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "vm-killer" '' ---> Using cache ---> 2ed275c4bfd0 Successfully built 2ed275c4bfd0 Sending build context to Docker daemon 5.12 kB Step 1/7 : FROM debian:sid ---> 68f33cf86aab Step 2/7 : MAINTAINER "David Vossel" \ ---> Using cache ---> 50fc79ebe51c Step 3/7 : ENV container docker ---> Using cache ---> b8e063496923 Step 4/7 : RUN apt-get update && apt-get install -y bash curl bzip2 qemu-utils && mkdir -p /disk && rm -rf /var/lib/apt/lists/* ---> Using cache ---> 8adb1572b35c Step 5/7 : ADD entry-point.sh / ---> Using cache ---> 8c0c5a52e4df Step 6/7 : CMD /entry-point.sh ---> Using cache ---> 1a4b838e5dee Step 7/7 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "registry-disk-v1alpha" '' ---> Using cache ---> 7aa3fd44f8c9 Successfully built 7aa3fd44f8c9 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32960/kubevirt/registry-disk-v1alpha:devel ---> 7aa3fd44f8c9 Step 2/4 : MAINTAINER "David Vossel" \ ---> Using cache ---> 5e0c3d37503b Step 3/4 : RUN curl https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img > /disk/cirros.img ---> Using cache ---> 2acb8de4d71e Step 4/4 : LABEL "cirros-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> 89f88bb54bf2 Successfully built 89f88bb54bf2 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32960/kubevirt/registry-disk-v1alpha:devel ---> 7aa3fd44f8c9 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 776bfb123af4 Step 3/4 : RUN curl -g -L https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 > /disk/fedora.qcow2 ---> Using cache ---> 288211d2b493 Step 4/4 : LABEL "fedora-cloud-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> 0912477735f2 Successfully built 0912477735f2 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32960/kubevirt/registry-disk-v1alpha:devel ---> 7aa3fd44f8c9 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 776bfb123af4 Step 3/4 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /disk/alpine.iso ---> Using cache ---> c0c8be599bed Step 4/4 : LABEL "alpine-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> f4b34e404811 Successfully built f4b34e404811 Sending build context to Docker daemon 35.59 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virtctl ---> Using cache ---> d74088d7a4fc Step 4/8 : WORKDIR /home/virtctl ---> Using cache ---> c8c857bf8d96 Step 5/8 : USER 1001 ---> Using cache ---> 36730a67b946 Step 6/8 : COPY subresource-access-test /subresource-access-test ---> Using cache ---> 8f761bd7e61f Step 7/8 : ENTRYPOINT /subresource-access-test ---> Using cache ---> 0df79703c2d5 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "subresource-access-test" '' ---> Running in a6058c7c2f40 ---> 0e8b9e8b3e0c Removing intermediate container a6058c7c2f40 Successfully built 0e8b9e8b3e0c Sending build context to Docker daemon 3.072 kB Step 1/9 : FROM fedora:28 ---> cc510acfcd70 Step 2/9 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/9 : ENV container docker ---> Using cache ---> aed3ca4ac3a3 Step 4/9 : RUN dnf -y install make git gcc && dnf -y clean all ---> Using cache ---> 6050b24a5d85 Step 5/9 : ENV GIMME_GO_VERSION 1.9.2 ---> Using cache ---> 0447d2178073 Step 6/9 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> 291db82d955f Step 7/9 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 793556477837 Step 8/9 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/masterzen/winrm-cli ---> Using cache ---> fd5c6e1f9461 Step 9/9 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "winrmcli" '' ---> Using cache ---> 91d1be1bcbe4 Successfully built 91d1be1bcbe4 Sending build context to Docker daemon 36.79 MB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 71a8c548e503 Step 3/5 : COPY example-hook-sidecar /example-hook-sidecar ---> Using cache ---> ccb584c87bb9 Step 4/5 : ENTRYPOINT /example-hook-sidecar ---> Using cache ---> 88c6de6e8bc7 Step 5/5 : LABEL "example-hook-sidecar" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in 5368bfb06744 ---> 2cf879e7f52f Removing intermediate container 5368bfb06744 Successfully built 2cf879e7f52f hack/build-docker.sh push The push refers to a repository [localhost:32960/kubevirt/virt-controller] 40b121e65161: Preparing b2f5abdac324: Preparing 891e1e4ef82a: Preparing b2f5abdac324: Pushed 40b121e65161: Pushed 891e1e4ef82a: Pushed devel: digest: sha256:c4b5b9bc113f5f6be151cc8c53a1ecb35c25b745dcc8e092a1e275ce534415ab size: 949 The push refers to a repository [localhost:32960/kubevirt/virt-launcher] e3173fa271ab: Preparing 7fe2a4444322: Preparing 8cf71f00028c: Preparing cdd674c84e53: Preparing 812cecc2c1a7: Preparing 0b99c4111657: Preparing da38cf808aa5: Preparing b83399358a92: Preparing 186d8b3e4fd8: Preparing 0b99c4111657: Waiting da38cf808aa5: Waiting b83399358a92: Waiting fa6154170bf5: Preparing 5eefb9960a36: Preparing 891e1e4ef82a: Preparing fa6154170bf5: Waiting 186d8b3e4fd8: Waiting 5eefb9960a36: Waiting 891e1e4ef82a: Waiting 7fe2a4444322: Pushed cdd674c84e53: Pushed e3173fa271ab: Pushed da38cf808aa5: Pushed b83399358a92: Pushed 186d8b3e4fd8: Pushed fa6154170bf5: Pushed 891e1e4ef82a: Mounted from kubevirt/virt-controller 8cf71f00028c: Pushed 0b99c4111657: Pushed 812cecc2c1a7: Pushed 5eefb9960a36: Pushed devel: digest: sha256:83e22692ca4898632fea07e31f34122dfc9e9184848589f2f3e76074e1031103 size: 2828 The push refers to a repository [localhost:32960/kubevirt/virt-handler] 41cb357213b4: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-launcher 41cb357213b4: Pushed devel: digest: sha256:1562b0a333fa322934179783fb772d369761d79557920dc423081b0499a8d4e6 size: 741 The push refers to a repository [localhost:32960/kubevirt/virt-api] ba33c524fd16: Preparing afd1d781e4d1: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-handler afd1d781e4d1: Pushed ba33c524fd16: Pushed devel: digest: sha256:e2314903a3efe0d9d46c505a6e2afb26634cc64b77eb7d8498fb9d199ee2bb6b size: 948 The push refers to a repository [localhost:32960/kubevirt/disks-images-provider] dc0875c44573: Preparing 8fc77a44094f: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-api dc0875c44573: Pushed 8fc77a44094f: Pushed devel: digest: sha256:d23d8d42ec6e15ae7ed6e778918aafb30b1527dcab703a192077860ecf796c74 size: 948 The push refers to a repository [localhost:32960/kubevirt/vm-killer] d1b69e768421: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/disks-images-provider d1b69e768421: Pushed devel: digest: sha256:e18b0719b6c92415bd3a9d4e45278bb4a4f7bccefbd3fe8c958aad9b913bc32c size: 740 The push refers to a repository [localhost:32960/kubevirt/registry-disk-v1alpha] 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 2a15632f54d4: Pushed 91a924e03d7c: Pushed 25edbec0eaea: Pushed devel: digest: sha256:93dbd4b6c598eae77e68f8119e129d092b75cfe0573a46c653a4578391b54edd size: 948 The push refers to a repository [localhost:32960/kubevirt/cirros-registry-disk-demo] f287bddc58c9: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 2a15632f54d4: Mounted from kubevirt/registry-disk-v1alpha 91a924e03d7c: Mounted from kubevirt/registry-disk-v1alpha 25edbec0eaea: Mounted from kubevirt/registry-disk-v1alpha f287bddc58c9: Pushed devel: digest: sha256:d84ec6e1c3b1e790318b351a867571430b0f77022b609bf72c7edc11774869a2 size: 1160 The push refers to a repository [localhost:32960/kubevirt/fedora-cloud-registry-disk-demo] 191bddb21627: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 91a924e03d7c: Mounted from kubevirt/cirros-registry-disk-demo 25edbec0eaea: Mounted from kubevirt/cirros-registry-disk-demo 2a15632f54d4: Mounted from kubevirt/cirros-registry-disk-demo 191bddb21627: Pushed devel: digest: sha256:721c5dc3b73e50b865b6d395e48884382c391509e18b4d77a3a27456a1eea65c size: 1161 The push refers to a repository [localhost:32960/kubevirt/alpine-registry-disk-demo] 8a362b640dc9: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 2a15632f54d4: Mounted from kubevirt/fedora-cloud-registry-disk-demo 25edbec0eaea: Mounted from kubevirt/fedora-cloud-registry-disk-demo 91a924e03d7c: Mounted from kubevirt/fedora-cloud-registry-disk-demo 8a362b640dc9: Pushed devel: digest: sha256:6c9639e0cb8ed67572ed78aad285cce752608f39802ce49856474162feae16f5 size: 1160 The push refers to a repository [localhost:32960/kubevirt/subresource-access-test] aa7efe7716c1: Preparing 4052ce9d0aff: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/vm-killer 4052ce9d0aff: Pushed aa7efe7716c1: Pushed devel: digest: sha256:050c115d88adb35d9ee2030fba9f148bf80b4de7153c358a2cf04236b3154969 size: 948 The push refers to a repository [localhost:32960/kubevirt/winrmcli] 64ccc7ac4271: Preparing 4242962b50c3: Preparing 0e374d8c733e: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/subresource-access-test 64ccc7ac4271: Pushed 0e374d8c733e: Pushed 4242962b50c3: Pushed devel: digest: sha256:7ba212e34e7bbac39ae9d54624462c338a98987d0eb9f59f8bb24b123847d8b4 size: 1165 The push refers to a repository [localhost:32960/kubevirt/example-hook-sidecar] dd3b7aae9cec: Preparing 39bae602f753: Preparing dd3b7aae9cec: Pushed 39bae602f753: Pushed devel: digest: sha256:11d5b6d78b98ddd843ddd246e64ba561a8aa4efeaed6cb5f32f7e4af38a31af1 size: 740 make[1]: Leaving directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' Done ./cluster/clean.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-138-g319cb65 ++ KUBEVIRT_VERSION=v0.7.0-138-g319cb65 + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:50a4b8ee3e07d592e7e4fbf3eb1401980a5947499dfdc3d847c085b5775aaa9a ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32960/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Cleaning up ...' Cleaning up ... + cluster/kubectl.sh get vmis --all-namespaces -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,FINALIZERS:.metadata.finalizers --no-headers + grep foregroundDeleteVirtualMachine + read p error: the server doesn't have a resource type "vmis" + _kubectl delete ds -l kubevirt.io -n kube-system --cascade=false --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=libvirt --force --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=virt-handler --force --grace-period 0 No resources found + namespaces=(default ${namespace}) + for i in '${namespaces[@]}' + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete deployment -l kubevirt.io No resources found + _kubectl -n default delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rs -l kubevirt.io No resources found + _kubectl -n default delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete services -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n default delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete secrets -l kubevirt.io No resources found + _kubectl -n default delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pv -l kubevirt.io No resources found + _kubectl -n default delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pvc -l kubevirt.io No resources found + _kubectl -n default delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete ds -l kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n default delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pods -l kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n default delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rolebinding -l kubevirt.io No resources found + _kubectl -n default delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete roles -l kubevirt.io No resources found + _kubectl -n default delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterroles -l kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n default get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + for i in '${namespaces[@]}' + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete deployment -l kubevirt.io No resources found + _kubectl -n kube-system delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rs -l kubevirt.io No resources found + _kubectl -n kube-system delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete services -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n kube-system delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete secrets -l kubevirt.io No resources found + _kubectl -n kube-system delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pv -l kubevirt.io No resources found + _kubectl -n kube-system delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pvc -l kubevirt.io No resources found + _kubectl -n kube-system delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete ds -l kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n kube-system delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pods -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete roles -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterroles -l kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + sleep 2 + echo Done Done ./cluster/deploy.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-138-g319cb65 ++ KUBEVIRT_VERSION=v0.7.0-138-g319cb65 + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:50a4b8ee3e07d592e7e4fbf3eb1401980a5947499dfdc3d847c085b5775aaa9a ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32960/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Deploying ...' Deploying ... + [[ -z openshift-3.10-release ]] + [[ openshift-3.10-release =~ .*-dev ]] + [[ openshift-3.10-release =~ .*-release ]] + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/demo-content.yaml =~ .*demo.* ]] + continue + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml =~ .*demo.* ]] + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml clusterrole.rbac.authorization.k8s.io "kubevirt.io:admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:edit" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:view" created serviceaccount "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver-auth-delegator" created rolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created role.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-controller" created serviceaccount "kubevirt-controller" created serviceaccount "kubevirt-privileged" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller-cluster-admin" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-privileged-cluster-admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:default" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt.io:default" created service "virt-api" created deployment.extensions "virt-api" created deployment.extensions "virt-controller" created daemonset.extensions "virt-handler" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstances.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancereplicasets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancepresets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachines.kubevirt.io" created + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R persistentvolumeclaim "disk-alpine" created persistentvolume "host-path-disk-alpine" created persistentvolumeclaim "disk-custom" created persistentvolume "host-path-disk-custom" created daemonset.extensions "disks-images-provider" created serviceaccount "kubevirt-testing" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-testing-cluster-admin" created + [[ os-3.10.0 =~ os-* ]] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-controller"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-testing"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-privileged"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-apiserver"] + _kubectl adm policy add-scc-to-user privileged admin + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged admin scc "privileged" added to: ["admin"] + echo Done Done + namespaces=(kube-system default) + [[ kube-system != \k\u\b\e\-\s\y\s\t\e\m ]] + timeout=300 + sample=30 + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'disks-images-provider-vs2cw 0/1 ContainerCreating 0 2s disks-images-provider-xjp4h 0/1 ContainerCreating 0 2s virt-api-7d79764579-qkfk6 0/1 ContainerCreating 0 9s virt-api-7d79764579-vhlzj 0/1 ContainerCreating 0 10s virt-controller-7d57d96b65-2wdgk 0/1 ContainerCreating 0 9s virt-controller-7d57d96b65-ghzq5 0/1 ContainerCreating 0 9s virt-handler-45mmt 0/1 ContainerCreating 0 10s virt-handler-d6ndr 0/1 ContainerCreating 0 10s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + cluster/kubectl.sh get pods -n kube-system --no-headers + grep -v Running disks-images-provider-vs2cw 0/1 ContainerCreating 0 2s disks-images-provider-xjp4h 0/1 ContainerCreating 0 2s virt-api-7d79764579-qkfk6 0/1 ContainerCreating 0 9s virt-api-7d79764579-vhlzj 0/1 ContainerCreating 0 10s virt-controller-7d57d96b65-2wdgk 0/1 ContainerCreating 0 9s virt-controller-7d57d96b65-ghzq5 0/1 ContainerCreating 0 9s virt-handler-45mmt 0/1 ContainerCreating 0 10s virt-handler-d6ndr 0/1 ContainerCreating 0 10s + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ grep -v Running ++ cluster/kubectl.sh get pods -n kube-system --no-headers + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n kube-system + cluster/kubectl.sh get pods -n kube-system NAME READY STATUS RESTARTS AGE disks-images-provider-vs2cw 1/1 Running 0 36s disks-images-provider-xjp4h 1/1 Running 0 36s master-api-node01 1/1 Running 1 18d master-controllers-node01 1/1 Running 1 18d master-etcd-node01 1/1 Running 1 18d virt-api-7d79764579-qkfk6 1/1 Running 0 43s virt-api-7d79764579-vhlzj 1/1 Running 1 44s virt-controller-7d57d96b65-2wdgk 1/1 Running 0 43s virt-controller-7d57d96b65-ghzq5 1/1 Running 0 43s virt-handler-45mmt 1/1 Running 0 44s virt-handler-d6ndr 1/1 Running 0 44s + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n default --no-headers ++ grep -v Running ++ cluster/kubectl.sh get pods -n default --no-headers + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n default + cluster/kubectl.sh get pods -n default NAME READY STATUS RESTARTS AGE docker-registry-1-rl562 1/1 Running 2 18d registry-console-1-rw9zf 1/1 Running 2 18d router-1-6cch9 1/1 Running 1 18d + kubectl version + cluster/kubectl.sh version oc v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 features: Basic-Auth GSSAPI Kerberos SPNEGO Server https://127.0.0.1:32957 openshift v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 + ginko_params='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + [[ openshift-3.10-release =~ windows.* ]] + FUNC_TEST_ARGS='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + make functest hack/dockerized "hack/build-func-tests.sh" sha256:ceba12cbc33e4e37a707840478a630db561e2427b78c8c9f9cd6d0b73276ab32 go version go1.10 linux/amd64 Waiting for rsyncd to be ready. go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/functests.sh Running Suite: Tests Suite ========================== Random Seed: 1532673560 Will run 148 of 148 specs • Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running level=info timestamp=2018-07-27T06:41:37.118057Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T06:41:40.555255Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T06:41:41.024710Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 06:41:45 http: TLS handshake error from 10.129.0.1:42700: EOF level=info timestamp=2018-07-27T06:41:51.066564Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T06:41:54.696799Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 06:41:55 http: TLS handshake error from 10.129.0.1:42708: EOF level=info timestamp=2018-07-27T06:41:56.775605Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T06:42:01.115311Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 06:42:05 http: TLS handshake error from 10.129.0.1:42716: EOF level=info timestamp=2018-07-27T06:42:07.183431Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T06:42:10.606059Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T06:42:11.163978Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 06:42:15 http: TLS handshake error from 10.129.0.1:42724: EOF level=info timestamp=2018-07-27T06:42:21.205863Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-vhlzj Pod phase: Running 2018/07/27 06:39:59 http: TLS handshake error from 10.129.0.1:46850: EOF 2018/07/27 06:40:09 http: TLS handshake error from 10.129.0.1:46858: EOF 2018/07/27 06:40:19 http: TLS handshake error from 10.129.0.1:46866: EOF 2018/07/27 06:40:29 http: TLS handshake error from 10.129.0.1:46874: EOF 2018/07/27 06:40:39 http: TLS handshake error from 10.129.0.1:46882: EOF 2018/07/27 06:40:49 http: TLS handshake error from 10.129.0.1:46890: EOF 2018/07/27 06:40:59 http: TLS handshake error from 10.129.0.1:46898: EOF 2018/07/27 06:41:09 http: TLS handshake error from 10.129.0.1:46906: EOF 2018/07/27 06:41:19 http: TLS handshake error from 10.129.0.1:46914: EOF 2018/07/27 06:41:29 http: TLS handshake error from 10.129.0.1:46922: EOF 2018/07/27 06:41:39 http: TLS handshake error from 10.129.0.1:46930: EOF 2018/07/27 06:41:49 http: TLS handshake error from 10.129.0.1:46938: EOF 2018/07/27 06:41:59 http: TLS handshake error from 10.129.0.1:46946: EOF 2018/07/27 06:42:09 http: TLS handshake error from 10.129.0.1:46954: EOF 2018/07/27 06:42:19 http: TLS handshake error from 10.129.0.1:46962: EOF Pod name: virt-controller-7d57d96b65-2wdgk Pod phase: Running level=info timestamp=2018-07-27T06:38:07.055925Z pos=virtinformers.go:104 component=virt-controller service=http msg="STARTING informer vmirsInformer" level=info timestamp=2018-07-27T06:38:07.055941Z pos=virtinformers.go:104 component=virt-controller service=http msg="STARTING informer configMapInformer" level=info timestamp=2018-07-27T06:38:07.055955Z pos=virtinformers.go:104 component=virt-controller service=http msg="STARTING informer vmInformer" level=info timestamp=2018-07-27T06:38:07.055969Z pos=virtinformers.go:104 component=virt-controller service=http msg="STARTING informer vmiInformer" level=info timestamp=2018-07-27T06:38:07.056059Z pos=vm.go:85 component=virt-controller service=http msg="Starting VirtualMachine controller." level=info timestamp=2018-07-27T06:38:07.061975Z pos=node.go:104 component=virt-controller service=http msg="Starting node controller." level=info timestamp=2018-07-27T06:38:07.062035Z pos=vmi.go:129 component=virt-controller service=http msg="Starting vmi controller." level=info timestamp=2018-07-27T06:38:07.062073Z pos=replicaset.go:111 component=virt-controller service=http msg="Starting VirtualMachineInstanceReplicaSet controller." level=info timestamp=2018-07-27T06:38:07.063331Z pos=preset.go:71 component=virt-controller service=http msg="Starting Virtual Machine Initializer." level=info timestamp=2018-07-27T06:39:23.681874Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitwllm kind= uid=cce28920-9167-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:39:23.726891Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitwllm kind= uid=cce28920-9167-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:39:23.932068Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitwllm\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitwllm" level=info timestamp=2018-07-27T06:39:24.123778Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiq88fh kind= uid=cd44c630-9167-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:39:24.123931Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiq88fh kind= uid=cd44c630-9167-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:39:25.790400Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiq88fh\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiq88fh" Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T06:38:08.369671Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T06:38:08.724889Z pos=virt-handler.go:87 component=virt-handler hostname=node02 level=info timestamp=2018-07-27T06:38:08.735035Z pos=vm.go:210 component=virt-handler msg="Starting virt-handler controller." level=info timestamp=2018-07-27T06:38:08.736817Z pos=cache.go:151 component=virt-handler msg="Synchronizing domains" level=info timestamp=2018-07-27T06:38:08.840378Z pos=device_controller.go:133 component=virt-handler msg="Starting device plugin controller" level=info timestamp=2018-07-27T06:38:08.868589Z pos=device_controller.go:127 component=virt-handler msg="tun device plugin started" level=info timestamp=2018-07-27T06:38:08.874696Z pos=device_controller.go:127 component=virt-handler msg="kvm device plugin started" Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T06:38:10.599690Z pos=virt-handler.go:87 component=virt-handler hostname=node01 level=info timestamp=2018-07-27T06:38:10.614065Z pos=vm.go:210 component=virt-handler msg="Starting virt-handler controller." level=info timestamp=2018-07-27T06:38:10.617285Z pos=cache.go:151 component=virt-handler msg="Synchronizing domains" level=info timestamp=2018-07-27T06:38:10.758019Z pos=device_controller.go:133 component=virt-handler msg="Starting device plugin controller" level=info timestamp=2018-07-27T06:38:11.216280Z pos=device_controller.go:127 component=virt-handler msg="tun device plugin started" level=info timestamp=2018-07-27T06:38:11.218568Z pos=device_controller.go:127 component=virt-handler msg="kvm device plugin started" Pod name: virt-launcher-testvmiq88fh-zwkhr Pod phase: Running level=info timestamp=2018-07-27T06:39:33.160065Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T06:39:33.160373Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-27T06:39:33.163733Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T06:39:45.946711Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T06:39:46.015942Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmiq88fh" level=info timestamp=2018-07-27T06:39:46.019164Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T06:39:46.019380Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" ------------------------------ • Failure [180.788 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with 3 CPU cores /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:56 should report 3 cpu cores under guest OS [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:62 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ STEP: Starting a VirtualMachineInstance level=info timestamp=2018-07-27T06:39:26.652747Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmiq88fh-zwkhr" Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running 2018/07/27 06:44:45 http: TLS handshake error from 10.129.0.1:42846: EOF level=info timestamp=2018-07-27T06:44:52.025401Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T06:44:54.693315Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 06:44:55 http: TLS handshake error from 10.129.0.1:42854: EOF level=info timestamp=2018-07-27T06:44:57.233835Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T06:45:02.077107Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 06:45:05 http: TLS handshake error from 10.129.0.1:42862: EOF level=info timestamp=2018-07-27T06:45:07.647484Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T06:45:08.140726Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/openapi/v2 proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T06:45:08.142039Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/swagger.json proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T06:45:10.894296Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T06:45:12.127889Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 06:45:15 http: TLS handshake error from 10.129.0.1:42870: EOF level=info timestamp=2018-07-27T06:45:22.167891Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T06:45:24.729511Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 Pod name: virt-api-7d79764579-vhlzj Pod phase: Running 2018/07/27 06:43:19 http: TLS handshake error from 10.129.0.1:47012: EOF level=info timestamp=2018-07-27T06:43:24.699549Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 06:43:29 http: TLS handshake error from 10.129.0.1:47020: EOF 2018/07/27 06:43:39 http: TLS handshake error from 10.129.0.1:47028: EOF 2018/07/27 06:43:49 http: TLS handshake error from 10.129.0.1:47036: EOF 2018/07/27 06:43:59 http: TLS handshake error from 10.129.0.1:47044: EOF 2018/07/27 06:44:09 http: TLS handshake error from 10.129.0.1:47052: EOF 2018/07/27 06:44:19 http: TLS handshake error from 10.129.0.1:47060: EOF level=info timestamp=2018-07-27T06:44:24.724766Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 06:44:29 http: TLS handshake error from 10.129.0.1:47068: EOF 2018/07/27 06:44:39 http: TLS handshake error from 10.129.0.1:47076: EOF 2018/07/27 06:44:49 http: TLS handshake error from 10.129.0.1:47084: EOF 2018/07/27 06:44:59 http: TLS handshake error from 10.129.0.1:47092: EOF 2018/07/27 06:45:09 http: TLS handshake error from 10.129.0.1:47100: EOF 2018/07/27 06:45:19 http: TLS handshake error from 10.129.0.1:47108: EOF Pod name: virt-controller-7d57d96b65-2wdgk Pod phase: Running level=info timestamp=2018-07-27T06:38:07.055955Z pos=virtinformers.go:104 component=virt-controller service=http msg="STARTING informer vmInformer" level=info timestamp=2018-07-27T06:38:07.055969Z pos=virtinformers.go:104 component=virt-controller service=http msg="STARTING informer vmiInformer" level=info timestamp=2018-07-27T06:38:07.056059Z pos=vm.go:85 component=virt-controller service=http msg="Starting VirtualMachine controller." level=info timestamp=2018-07-27T06:38:07.061975Z pos=node.go:104 component=virt-controller service=http msg="Starting node controller." level=info timestamp=2018-07-27T06:38:07.062035Z pos=vmi.go:129 component=virt-controller service=http msg="Starting vmi controller." level=info timestamp=2018-07-27T06:38:07.062073Z pos=replicaset.go:111 component=virt-controller service=http msg="Starting VirtualMachineInstanceReplicaSet controller." level=info timestamp=2018-07-27T06:38:07.063331Z pos=preset.go:71 component=virt-controller service=http msg="Starting Virtual Machine Initializer." level=info timestamp=2018-07-27T06:39:23.681874Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitwllm kind= uid=cce28920-9167-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:39:23.726891Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitwllm kind= uid=cce28920-9167-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:39:23.932068Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitwllm\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitwllm" level=info timestamp=2018-07-27T06:39:24.123778Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiq88fh kind= uid=cd44c630-9167-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:39:24.123931Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiq88fh kind= uid=cd44c630-9167-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:39:25.790400Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiq88fh\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiq88fh" level=info timestamp=2018-07-27T06:42:24.940705Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi422tm kind= uid=39072e40-9168-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:42:24.940926Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi422tm kind= uid=39072e40-9168-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T06:38:08.369671Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T06:38:08.724889Z pos=virt-handler.go:87 component=virt-handler hostname=node02 level=info timestamp=2018-07-27T06:38:08.735035Z pos=vm.go:210 component=virt-handler msg="Starting virt-handler controller." level=info timestamp=2018-07-27T06:38:08.736817Z pos=cache.go:151 component=virt-handler msg="Synchronizing domains" level=info timestamp=2018-07-27T06:38:08.840378Z pos=device_controller.go:133 component=virt-handler msg="Starting device plugin controller" level=info timestamp=2018-07-27T06:38:08.868589Z pos=device_controller.go:127 component=virt-handler msg="tun device plugin started" level=info timestamp=2018-07-27T06:38:08.874696Z pos=device_controller.go:127 component=virt-handler msg="kvm device plugin started" Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T06:38:10.599690Z pos=virt-handler.go:87 component=virt-handler hostname=node01 level=info timestamp=2018-07-27T06:38:10.614065Z pos=vm.go:210 component=virt-handler msg="Starting virt-handler controller." level=info timestamp=2018-07-27T06:38:10.617285Z pos=cache.go:151 component=virt-handler msg="Synchronizing domains" level=info timestamp=2018-07-27T06:38:10.758019Z pos=device_controller.go:133 component=virt-handler msg="Starting device plugin controller" level=info timestamp=2018-07-27T06:38:11.216280Z pos=device_controller.go:127 component=virt-handler msg="tun device plugin started" level=info timestamp=2018-07-27T06:38:11.218568Z pos=device_controller.go:127 component=virt-handler msg="kvm device plugin started" Pod name: virt-launcher-testvmi422tm-xvcxl Pod phase: Running level=info timestamp=2018-07-27T06:42:33.366503Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T06:42:33.367481Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-27T06:42:33.370155Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T06:42:43.458258Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T06:42:43.527347Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmi422tm" level=info timestamp=2018-07-27T06:42:43.529433Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T06:42:43.529654Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" • Failure [180.490 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:108 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-2Mi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.005s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ STEP: Starting a VM level=info timestamp=2018-07-27T06:42:25.851029Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmi422tm-xvcxl" S [SKIPPING] [0.220 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:108 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-1Gi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 No node with hugepages hugepages-1Gi capacity /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:160 ------------------------------ • ------------------------------ • [SLOW TEST:122.701 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:284 should report defined CPU model /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:285 ------------------------------ 2018/07/27 02:47:30 read closing down: EOF 2018/07/27 02:49:35 read closing down: EOF • [SLOW TEST:125.939 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model equals to passthrough /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:312 should report exactly the same model as node CPU /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:313 ------------------------------ • [SLOW TEST:121.380 seconds] 2018/07/27 02:51:37 read closing down: EOF Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model not defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:336 should report CPU model from libvirt capabilities /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:337 ------------------------------ 2018/07/27 02:52:29 read closing down: EOF • [SLOW TEST:52.490 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 New VirtualMachineInstance with all supported drives /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:357 should have all the device nodes /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:380 ------------------------------ • [SLOW TEST:48.850 seconds] CloudInit UserData 2018/07/27 02:53:18 read closing down: EOF /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 should have cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:82 ------------------------------ • [SLOW TEST:174.385 seconds] CloudInit UserData 2018/07/27 02:56:13 read closing down: EOF /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 with injected ssh-key /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:92 should have ssh-key under authorized keys /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:93 ------------------------------ 2018/07/27 02:57:02 read closing down: EOF • [SLOW TEST:60.059 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userData source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:118 should process provided cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:119 ------------------------------ 2018/07/27 02:57:13 read closing down: EOF 2018/07/27 02:58:00 read closing down: EOF • [SLOW TEST:47.858 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 should take user-data from k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:162 ------------------------------ Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running level=info timestamp=2018-07-27T07:00:24.219745Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:00:25 http: TLS handshake error from 10.129.0.1:43626: EOF level=info timestamp=2018-07-27T07:00:25.281505Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:00:35 http: TLS handshake error from 10.129.0.1:43634: EOF level=info timestamp=2018-07-27T07:00:35.336660Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:00:45 http: TLS handshake error from 10.129.0.1:43642: EOF level=info timestamp=2018-07-27T07:00:45.387005Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:00:50.632766Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:00:51.728669Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:00:54.267821Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:00:55 http: TLS handshake error from 10.129.0.1:43650: EOF level=info timestamp=2018-07-27T07:00:55.435006Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:00:58.256916Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:00:58.269958Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:00:58.282308Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-vhlzj Pod phase: Running 2018/07/27 06:59:19 http: TLS handshake error from 10.129.0.1:47808: EOF 2018/07/27 06:59:29 http: TLS handshake error from 10.129.0.1:47816: EOF 2018/07/27 06:59:39 http: TLS handshake error from 10.129.0.1:47824: EOF 2018/07/27 06:59:49 http: TLS handshake error from 10.129.0.1:47832: EOF level=info timestamp=2018-07-27T06:59:53.475706Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T06:59:54.003199Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 06:59:59 http: TLS handshake error from 10.129.0.1:47840: EOF level=info timestamp=2018-07-27T07:00:06.834679Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:00:09 http: TLS handshake error from 10.129.0.1:47848: EOF 2018/07/27 07:00:19 http: TLS handshake error from 10.129.0.1:47856: EOF 2018/07/27 07:00:29 http: TLS handshake error from 10.129.0.1:47864: EOF level=info timestamp=2018-07-27T07:00:36.813737Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:00:39 http: TLS handshake error from 10.129.0.1:47872: EOF 2018/07/27 07:00:49 http: TLS handshake error from 10.129.0.1:47880: EOF 2018/07/27 07:00:59 http: TLS handshake error from 10.129.0.1:47890: EOF Pod name: virt-controller-7d57d96b65-2wdgk Pod phase: Running level=info timestamp=2018-07-27T06:52:29.223781Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmigx6mn kind= uid=a1397eb9-9169-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:52:29.223968Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmigx6mn kind= uid=a1397eb9-9169-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:53:18.064315Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmic6b5b kind= uid=be563397-9169-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:53:18.064495Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmic6b5b kind= uid=be563397-9169-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:56:12.439995Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmipmzsh kind= uid=2645ff33-916a-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:56:12.440223Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmipmzsh kind= uid=2645ff33-916a-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:56:12.518168Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmipmzsh\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmipmzsh" level=info timestamp=2018-07-27T06:56:12.531940Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmipmzsh\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmipmzsh" level=info timestamp=2018-07-27T06:57:12.516718Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:57:12.516882Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:57:12.580548Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5zrf\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5zrf" level=info timestamp=2018-07-27T06:57:12.602518Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5zrf\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5zrf" level=info timestamp=2018-07-27T06:58:00.353142Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7pbkp kind= uid=66982e18-916a-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:58:00.353287Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7pbkp kind= uid=66982e18-916a-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:58:00.460996Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7pbkp\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7pbkp" Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T06:38:08.369671Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T06:58:00.164014Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T06:58:00.164564Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmil5zrf" level=info timestamp=2018-07-27T06:58:00.373136Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.373249Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T06:58:00.373276Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T06:58:00.373365Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T06:58:00.373402Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.373473Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T06:58:00.373532Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:58:00.373608Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.375136Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T06:58:00.722249Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:58:00.722375Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.722516Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:58:00.722610Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T06:51:37.735981Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.736172Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.736374Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=error timestamp=2018-07-27T06:51:37.756390Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind= uid=44a0f0ff-9169-11e8-96ba-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifqlmx\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifqlmx, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 44a0f0ff-9169-11e8-96ba-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-27T06:51:37.756598Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifqlmx\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifqlmx, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 44a0f0ff-9169-11e8-96ba-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmifqlmx" level=info timestamp=2018-07-27T06:51:37.756947Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.757139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.766911Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.766992Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.805480Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.805731Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:38.159890Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:38.160031Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:38.160136Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:38.160199Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmi7pbkp-bc2vc Pod phase: Running level=info timestamp=2018-07-27T06:58:06.167968Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T06:58:06.168831Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[OnDefineDomain:[0xc4200bb800]]" level=info timestamp=2018-07-27T06:58:06.171184Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T06:58:16.178188Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T06:58:16.228308Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmi7pbkp" level=info timestamp=2018-07-27T06:58:16.229966Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T06:58:16.230229Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" • Failure [180.457 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should successfully start with hook sidecar annotation [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:60 Timed out after 90.005s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ STEP: Starting a VMI level=info timestamp=2018-07-27T06:58:01.338148Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmi7pbkp-bc2vc" Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running 2018/07/27 07:03:25 http: TLS handshake error from 10.129.0.1:43772: EOF level=info timestamp=2018-07-27T07:03:26.341173Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:03:35 http: TLS handshake error from 10.129.0.1:43780: EOF level=info timestamp=2018-07-27T07:03:36.384038Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:03:36.844924Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:03:45 http: TLS handshake error from 10.129.0.1:43788: EOF level=info timestamp=2018-07-27T07:03:46.440311Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:03:51.023285Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:03:52.035715Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:03:54.609745Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:03:55 http: TLS handshake error from 10.129.0.1:43796: EOF level=info timestamp=2018-07-27T07:03:56.505854Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:03:58.879880Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:03:58.895902Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:03:58.912234Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-vhlzj Pod phase: Running 2018/07/27 07:01:59 http: TLS handshake error from 10.129.0.1:47938: EOF level=info timestamp=2018-07-27T07:02:06.842686Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:02:09 http: TLS handshake error from 10.129.0.1:47946: EOF 2018/07/27 07:02:19 http: TLS handshake error from 10.129.0.1:47954: EOF 2018/07/27 07:02:29 http: TLS handshake error from 10.129.0.1:47962: EOF 2018/07/27 07:02:39 http: TLS handshake error from 10.129.0.1:47970: EOF 2018/07/27 07:02:49 http: TLS handshake error from 10.129.0.1:47978: EOF 2018/07/27 07:02:59 http: TLS handshake error from 10.129.0.1:47986: EOF level=info timestamp=2018-07-27T07:03:06.885528Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:03:09 http: TLS handshake error from 10.129.0.1:47994: EOF 2018/07/27 07:03:19 http: TLS handshake error from 10.129.0.1:48002: EOF 2018/07/27 07:03:29 http: TLS handshake error from 10.129.0.1:48010: EOF 2018/07/27 07:03:39 http: TLS handshake error from 10.129.0.1:48018: EOF 2018/07/27 07:03:49 http: TLS handshake error from 10.129.0.1:48026: EOF 2018/07/27 07:03:59 http: TLS handshake error from 10.129.0.1:48036: EOF Pod name: virt-controller-7d57d96b65-2wdgk Pod phase: Running level=info timestamp=2018-07-27T06:53:18.064495Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmic6b5b kind= uid=be563397-9169-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:56:12.439995Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmipmzsh kind= uid=2645ff33-916a-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:56:12.440223Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmipmzsh kind= uid=2645ff33-916a-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:56:12.518168Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmipmzsh\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmipmzsh" level=info timestamp=2018-07-27T06:56:12.531940Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmipmzsh\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmipmzsh" level=info timestamp=2018-07-27T06:57:12.516718Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:57:12.516882Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:57:12.580548Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5zrf\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5zrf" level=info timestamp=2018-07-27T06:57:12.602518Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5zrf\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5zrf" level=info timestamp=2018-07-27T06:58:00.353142Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7pbkp kind= uid=66982e18-916a-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:58:00.353287Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7pbkp kind= uid=66982e18-916a-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:58:00.460996Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7pbkp\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7pbkp" level=info timestamp=2018-07-27T07:01:00.818348Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmir5v59 kind= uid=d229344e-916a-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:01:00.818540Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmir5v59 kind= uid=d229344e-916a-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:01:00.891400Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmir5v59\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmir5v59" Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T06:38:08.369671Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T06:58:00.164014Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T06:58:00.164564Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmil5zrf" level=info timestamp=2018-07-27T06:58:00.373136Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.373249Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T06:58:00.373276Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T06:58:00.373365Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T06:58:00.373402Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.373473Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T06:58:00.373532Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:58:00.373608Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.375136Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T06:58:00.722249Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:58:00.722375Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.722516Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:58:00.722610Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T06:51:37.735981Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.736172Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.736374Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=error timestamp=2018-07-27T06:51:37.756390Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind= uid=44a0f0ff-9169-11e8-96ba-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifqlmx\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifqlmx, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 44a0f0ff-9169-11e8-96ba-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-27T06:51:37.756598Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifqlmx\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifqlmx, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 44a0f0ff-9169-11e8-96ba-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmifqlmx" level=info timestamp=2018-07-27T06:51:37.756947Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.757139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.766911Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.766992Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.805480Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.805731Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:38.159890Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:38.160031Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:38.160136Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:38.160199Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmir5v59-58nwx Pod phase: Running level=info timestamp=2018-07-27T07:01:05.766962Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T07:01:05.767321Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[OnDefineDomain:[0xc4204065c0]]" level=info timestamp=2018-07-27T07:01:05.768669Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T07:01:15.775102Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T07:01:15.814179Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmir5v59" level=info timestamp=2018-07-27T07:01:15.816912Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T07:01:15.817381Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" • Failure [180.460 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should call Collect and OnDefineDomain on the hook sidecar [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:67 Timed out after 90.005s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ STEP: Getting hook-sidecar logs level=info timestamp=2018-07-27T07:01:01.770270Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmir5v59-58nwx" Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running 2018/07/27 07:06:25 http: TLS handshake error from 10.129.0.1:43918: EOF level=info timestamp=2018-07-27T07:06:27.345360Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:06:35 http: TLS handshake error from 10.129.0.1:43926: EOF level=info timestamp=2018-07-27T07:06:36.869746Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T07:06:37.391168Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:06:45 http: TLS handshake error from 10.129.0.1:43934: EOF level=info timestamp=2018-07-27T07:06:47.435590Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:06:51.461410Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:06:52.361576Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:06:54.952473Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:06:55 http: TLS handshake error from 10.129.0.1:43942: EOF level=info timestamp=2018-07-27T07:06:57.480088Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:06:59.550624Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:06:59.569177Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:06:59.597811Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-vhlzj Pod phase: Running 2018/07/27 07:04:49 http: TLS handshake error from 10.129.0.1:48076: EOF 2018/07/27 07:04:59 http: TLS handshake error from 10.129.0.1:48084: EOF level=info timestamp=2018-07-27T07:05:06.877459Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:05:09 http: TLS handshake error from 10.129.0.1:48092: EOF 2018/07/27 07:05:19 http: TLS handshake error from 10.129.0.1:48100: EOF 2018/07/27 07:05:29 http: TLS handshake error from 10.129.0.1:48108: EOF 2018/07/27 07:05:39 http: TLS handshake error from 10.129.0.1:48116: EOF 2018/07/27 07:05:49 http: TLS handshake error from 10.129.0.1:48124: EOF 2018/07/27 07:05:59 http: TLS handshake error from 10.129.0.1:48132: EOF 2018/07/27 07:06:09 http: TLS handshake error from 10.129.0.1:48140: EOF 2018/07/27 07:06:19 http: TLS handshake error from 10.129.0.1:48148: EOF 2018/07/27 07:06:29 http: TLS handshake error from 10.129.0.1:48156: EOF 2018/07/27 07:06:39 http: TLS handshake error from 10.129.0.1:48164: EOF 2018/07/27 07:06:49 http: TLS handshake error from 10.129.0.1:48172: EOF 2018/07/27 07:06:59 http: TLS handshake error from 10.129.0.1:48180: EOF Pod name: virt-controller-7d57d96b65-2wdgk Pod phase: Running level=info timestamp=2018-07-27T06:56:12.531940Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmipmzsh\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmipmzsh" level=info timestamp=2018-07-27T06:57:12.516718Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:57:12.516882Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:57:12.580548Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5zrf\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5zrf" level=info timestamp=2018-07-27T06:57:12.602518Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5zrf\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5zrf" level=info timestamp=2018-07-27T06:58:00.353142Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7pbkp kind= uid=66982e18-916a-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T06:58:00.353287Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7pbkp kind= uid=66982e18-916a-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T06:58:00.460996Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7pbkp\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7pbkp" level=info timestamp=2018-07-27T07:01:00.818348Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmir5v59 kind= uid=d229344e-916a-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:01:00.818540Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmir5v59 kind= uid=d229344e-916a-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:01:00.891400Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmir5v59\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmir5v59" level=info timestamp=2018-07-27T07:04:01.277065Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7kh2l kind= uid=3db8d1d7-916b-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:04:01.277249Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7kh2l kind= uid=3db8d1d7-916b-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:04:01.363187Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7kh2l\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7kh2l" level=info timestamp=2018-07-27T07:04:01.414245Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7kh2l\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7kh2l" Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T06:38:08.369671Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T06:58:00.164014Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T06:58:00.164564Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmil5zrf" level=info timestamp=2018-07-27T06:58:00.373136Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind= uid=4a14de45-916a-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.373249Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T06:58:00.373276Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T06:58:00.373365Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T06:58:00.373402Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.373473Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T06:58:00.373532Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:58:00.373608Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.375136Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T06:58:00.722249Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:58:00.722375Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:58:00.722516Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:58:00.722610Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmil5zrf kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T06:51:37.735981Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.736172Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.736374Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=error timestamp=2018-07-27T06:51:37.756390Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind= uid=44a0f0ff-9169-11e8-96ba-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifqlmx\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifqlmx, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 44a0f0ff-9169-11e8-96ba-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-27T06:51:37.756598Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifqlmx\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifqlmx, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 44a0f0ff-9169-11e8-96ba-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmifqlmx" level=info timestamp=2018-07-27T06:51:37.756947Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.757139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.766911Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.766992Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.805480Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.805731Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:38.159890Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:38.160031Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:38.160136Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:38.160199Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmi7kh2l-kjmsh Pod phase: Running level=info timestamp=2018-07-27T07:04:06.050863Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T07:04:06.051258Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[OnDefineDomain:[0xc4200ba500]]" level=info timestamp=2018-07-27T07:04:06.052663Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T07:04:16.059833Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T07:04:16.098239Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmi7kh2l" level=info timestamp=2018-07-27T07:04:16.102829Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T07:04:16.103442Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" • Failure [180.455 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should update domain XML with SM BIOS properties [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:83 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ STEP: Reading domain XML using virsh level=info timestamp=2018-07-27T07:04:02.233210Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmi7kh2l-kjmsh" • [SLOW TEST:51.254 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 with a cirros image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:67 should return that we are running cirros /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:68 ------------------------------ 2018/07/27 03:07:53 read closing down: EOF 2018/07/27 03:08:58 read closing down: EOF • [SLOW TEST:65.024 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 with a fedora image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:77 should return that we are running fedora /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:78 ------------------------------ Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running level=info timestamp=2018-07-27T07:09:21.741660Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=info timestamp=2018-07-27T07:09:21.756103Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:09:22.670740Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=error timestamp=2018-07-27T07:09:22.818339Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:22.818407Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:23.879428Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:23.879515Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:24.931542Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:24.931760Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 2018/07/27 07:09:25 http: TLS handshake error from 10.129.0.1:44068: EOF level=info timestamp=2018-07-27T07:09:25.249714Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=error timestamp=2018-07-27T07:09:26.007975Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:26.008069Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:27.058456Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:27.058533Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:28.113613Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:28.113707Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 Pod name: virt-api-7d79764579-vhlzj Pod phase: Running level=info timestamp=2018-07-27T07:09:06.926855Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:07.977620Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:07.977778Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 2018/07/27 07:09:09 http: TLS handshake error from 10.129.0.1:48290: EOF level=error timestamp=2018-07-27T07:09:10.092358Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:10.092517Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:11.151899Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:11.151998Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:13.273693Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:13.273902Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:14.357971Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:14.358058Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:16.452922Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:16.453011Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmi8b42g/console proto=HTTP/1.1 statusCode=400 contentLength=90 2018/07/27 07:09:19 http: TLS handshake error from 10.129.0.1:48298: EOF Pod name: virt-controller-7d57d96b65-2wdgk Pod phase: Running level=info timestamp=2018-07-27T07:01:00.818348Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmir5v59 kind= uid=d229344e-916a-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:01:00.818540Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmir5v59 kind= uid=d229344e-916a-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:01:00.891400Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmir5v59\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmir5v59" level=info timestamp=2018-07-27T07:04:01.277065Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7kh2l kind= uid=3db8d1d7-916b-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:04:01.277249Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7kh2l kind= uid=3db8d1d7-916b-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:04:01.363187Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7kh2l\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7kh2l" level=info timestamp=2018-07-27T07:04:01.414245Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7kh2l\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7kh2l" level=info timestamp=2018-07-27T07:07:01.568991Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7kh2l\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi7kh2l, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 3db8d1d7-916b-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7kh2l" level=info timestamp=2018-07-27T07:07:01.732543Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijfqjd kind= uid=a948049b-916b-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:07:01.732698Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijfqjd kind= uid=a948049b-916b-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:07:52.992701Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:07:52.992860Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:08:58.047715Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8b42g kind= uid=ee9adcd8-916b-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:08:58.047837Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8b42g kind= uid=ee9adcd8-916b-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:08:58.136564Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8b42g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8b42g" Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T06:38:08.369671Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T07:08:19.991428Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:08:19.992975Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-27T07:08:19.996784Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:08:57.912621Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-27T07:08:57.912782Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:08:57.913184Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmip9qbx" level=info timestamp=2018-07-27T07:08:58.127812Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:08:58.127946Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T07:08:58.127976Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T07:08:58.128489Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:08:58.128739Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:08:58.132183Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:08:58.132327Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:08:58.132386Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:08:58.132465Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T06:51:37.735981Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.736172Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.736374Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=error timestamp=2018-07-27T06:51:37.756390Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind= uid=44a0f0ff-9169-11e8-96ba-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifqlmx\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifqlmx, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 44a0f0ff-9169-11e8-96ba-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-27T06:51:37.756598Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifqlmx\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifqlmx, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 44a0f0ff-9169-11e8-96ba-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmifqlmx" level=info timestamp=2018-07-27T06:51:37.756947Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.757139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.766911Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.766992Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.805480Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.805731Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:38.159890Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:38.160031Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:38.160136Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:38.160199Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmi8b42g-gkgvg Pod phase: Running level=info timestamp=2018-07-27T07:09:01.899196Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T07:09:01.899492Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-27T07:09:01.912363Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T07:09:11.919340Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T07:09:11.944428Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmi8b42g" level=info timestamp=2018-07-27T07:09:11.946828Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T07:09:11.946996Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" • Failure [30.472 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should be able to reconnect to console multiple times [It] /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:87 Expected error: <*errors.errorString | 0xc420802010>: { s: "Timeout trying to connect to the virtual machine instance", } Timeout trying to connect to the virtual machine instance not to have occurred /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:100 ------------------------------ STEP: Creating a new VirtualMachineInstance STEP: Checking that the console output equals to expected one Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running 2018/07/27 07:09:45 http: TLS handshake error from 10.129.0.1:44084: EOF level=error timestamp=2018-07-27T07:09:45.492573Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:45.492660Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmirhqvz/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=info timestamp=2018-07-27T07:09:48.517572Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=error timestamp=2018-07-27T07:09:48.640516Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:48.640621Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmirhqvz/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:49.692419Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:49.692509Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmirhqvz/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:50.750770Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:50.750886Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmirhqvz/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=info timestamp=2018-07-27T07:09:51.820858Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:09:52.717692Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:09:54.007653Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:09:55 http: TLS handshake error from 10.129.0.1:44092: EOF level=info timestamp=2018-07-27T07:09:55.299368Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-vhlzj Pod phase: Running level=error timestamp=2018-07-27T07:09:51.804223Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:51.804331Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmirhqvz/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:52.851612Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:52.851696Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmirhqvz/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=info timestamp=2018-07-27T07:09:53.493892Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=error timestamp=2018-07-27T07:09:53.901163Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:53.901279Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmirhqvz/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:54.960066Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:54.960170Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmirhqvz/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:56.036196Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:56.036320Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmirhqvz/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:57.095997Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:57.096150Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmirhqvz/console proto=HTTP/1.1 statusCode=400 contentLength=90 level=error timestamp=2018-07-27T07:09:58.159205Z pos=subresource.go:54 component=virt-api reason="Unable to connect to VirtualMachineInstance because phase is Scheduling instead of Running" msg="Failed to gather remote exec info for subresource request." level=info timestamp=2018-07-27T07:09:58.159309Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmirhqvz/console proto=HTTP/1.1 statusCode=400 contentLength=90 Pod name: virt-controller-7d57d96b65-2wdgk Pod phase: Running level=info timestamp=2018-07-27T07:04:01.277065Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7kh2l kind= uid=3db8d1d7-916b-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:04:01.277249Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7kh2l kind= uid=3db8d1d7-916b-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:04:01.363187Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7kh2l\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7kh2l" level=info timestamp=2018-07-27T07:04:01.414245Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7kh2l\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7kh2l" level=info timestamp=2018-07-27T07:07:01.568991Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7kh2l\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi7kh2l, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 3db8d1d7-916b-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7kh2l" level=info timestamp=2018-07-27T07:07:01.732543Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijfqjd kind= uid=a948049b-916b-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:07:01.732698Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijfqjd kind= uid=a948049b-916b-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:07:52.992701Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:07:52.992860Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:08:58.047715Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8b42g kind= uid=ee9adcd8-916b-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:08:58.047837Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8b42g kind= uid=ee9adcd8-916b-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:08:58.136564Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8b42g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8b42g" level=info timestamp=2018-07-27T07:09:28.338162Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8b42g\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi8b42g, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: ee9adcd8-916b-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8b42g" level=info timestamp=2018-07-27T07:09:28.498991Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirhqvz kind= uid=00c2bb1c-916c-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:09:28.499104Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirhqvz kind= uid=00c2bb1c-916c-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T06:38:08.369671Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T07:08:19.991428Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:08:19.992975Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-27T07:08:19.996784Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:08:57.912621Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-27T07:08:57.912782Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:08:57.913184Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmip9qbx" level=info timestamp=2018-07-27T07:08:58.127812Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind= uid=c7d5ab10-916b-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:08:58.127946Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T07:08:58.127976Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T07:08:58.128489Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:08:58.128739Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:08:58.132183Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:08:58.132327Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:08:58.132386Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmip9qbx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:08:58.132465Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T06:51:37.735981Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.736172Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.736374Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind= uid=3a06576f-9169-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=error timestamp=2018-07-27T06:51:37.756390Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind= uid=44a0f0ff-9169-11e8-96ba-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifqlmx\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifqlmx, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 44a0f0ff-9169-11e8-96ba-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-27T06:51:37.756598Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifqlmx\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifqlmx, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 44a0f0ff-9169-11e8-96ba-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmifqlmx" level=info timestamp=2018-07-27T06:51:37.756947Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.757139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.766911Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.766992Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifqlmx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:37.805480Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:37.805731Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:38.159890Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:38.160031Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T06:51:38.160136Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T06:51:38.160199Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimgw88 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmirhqvz-8rswp Pod phase: Running level=info timestamp=2018-07-27T07:09:31.755579Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T07:09:31.756488Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-27T07:09:31.758043Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T07:09:41.765811Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T07:09:41.809443Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmirhqvz" level=info timestamp=2018-07-27T07:09:41.813463Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T07:09:41.813780Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" • Failure [30.438 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should wait until the virtual machine is in running state and return a stream interface [It] /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:103 Expected error: <*errors.errorString | 0xc420867fe0>: { s: "Timeout trying to connect to the virtual machine instance", } Timeout trying to connect to the virtual machine instance not to have occurred /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:109 ------------------------------ STEP: Creating a new VirtualMachineInstance • [SLOW TEST:30.220 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should fail waiting for the virtual machine instance to be running /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:111 ------------------------------ • [SLOW TEST:30.225 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should fail waiting for the expecter /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:134 ------------------------------ • [SLOW TEST:36.240 seconds] LeaderElection /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:43 Start a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:53 when the controller pod is not running /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:54 should success /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:55 ------------------------------ • ------------------------------ • [SLOW TEST:8.585 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:7.875 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given an vm /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:8.026 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi preset /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:7.962 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi replica set /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •• ------------------------------ • [SLOW TEST:16.148 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should update VirtualMachine once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:195 ------------------------------ • [SLOW TEST:10.299 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should remove VirtualMachineInstance once the VMI is marked for deletion /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:204 ------------------------------ • ------------------------------ • [SLOW TEST:47.528 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if it gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:245 ------------------------------ • [SLOW TEST:37.458 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if the VirtualMachineInstance's pod gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:265 ------------------------------ • [SLOW TEST:42.487 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should stop VirtualMachineInstance if running set to false /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:325 ------------------------------ • [SLOW TEST:189.541 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should start and stop VirtualMachineInstance multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:333 ------------------------------ • [SLOW TEST:52.455 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should not update the VirtualMachineInstance spec if Running /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:346 ------------------------------ • [SLOW TEST:177.608 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should survive guest shutdown, multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:387 ------------------------------ 2018/07/27 03:21:44 read closing down: EOF 2018/07/27 03:21:44 read closing down: EOF 2018/07/27 03:21:44 read closing down: EOF VM testvmixzsjm was scheduled to start • [SLOW TEST:16.384 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should start a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:436 ------------------------------ VM testvminkvql was scheduled to stop • [SLOW TEST:24.379 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should stop a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:467 ------------------------------ • ------------------------------ • [SLOW TEST:6.959 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should scale /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 to five, to six and then to zero replicas /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •• ------------------------------ • [SLOW TEST:18.531 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should update readyReplicas once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:157 ------------------------------ • [SLOW TEST:6.633 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should remove VMIs once it is marked for deletion /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:169 ------------------------------ • ------------------------------ • [SLOW TEST:5.709 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should not scale when paused and scale when resume /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:223 ------------------------------ • [SLOW TEST:5.490 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should remove the finished VM /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:279 ------------------------------ ••••2018/07/27 03:24:15 read closing down: EOF 2018/07/27 03:25:07 read closing down: EOF 2018/07/27 03:25:09 read closing down: EOF ------------------------------ • [SLOW TEST:105.308 seconds] Slirp /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:39 should be able to /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 VirtualMachineInstance with slirp interface /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ 2018/07/27 03:25:11 read closing down: EOF • ------------------------------ • [SLOW TEST:17.230 seconds] VNC /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:54 with VNC connection /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:62 should allow accessing the VNC device /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:64 ------------------------------ ••2018/07/27 03:26:20 read closing down: EOF Service cluster-ip-vmi successfully exposed for virtualmachineinstance testvmi85lj5 ------------------------------ • [SLOW TEST:55.415 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:68 Should expose a Cluster IP service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:71 ------------------------------ Service cluster-ip-target-vmi successfully exposed for virtualmachineinstance testvmi85lj5 •Service node-port-vmi successfully exposed for virtualmachineinstance testvmi85lj5 ------------------------------ • [SLOW TEST:9.188 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose NodePort service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:124 Should expose a NodePort service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:129 ------------------------------ 2018/07/27 03:27:27 read closing down: EOF Service cluster-ip-udp-vmi successfully exposed for virtualmachineinstance testvmimkxb2 • [SLOW TEST:56.516 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VMI /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:166 Expose ClusterIP UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:173 Should expose a ClusterIP service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:177 ------------------------------ Service node-port-udp-vmi successfully exposed for virtualmachineinstance testvmimkxb2 • [SLOW TEST:10.247 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VMI /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:166 Expose NodePort UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:205 Should expose a NodePort service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:210 ------------------------------ 2018/07/27 03:28:47 read closing down: EOF 2018/07/27 03:28:58 read closing down: EOF Service cluster-ip-vmirs successfully exposed for vmirs replicasettdmp9 • [SLOW TEST:80.815 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VMI replica set /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:253 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:286 Should create a ClusterIP service on VMRS and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:290 ------------------------------ Service cluster-ip-vm successfully exposed for virtualmachine testvmibvd8t VM testvmibvd8t was scheduled to start 2018/07/27 03:30:01 read closing down: EOF • [SLOW TEST:62.605 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on an VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:318 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:362 Connect to ClusterIP services that was set when VM was offline /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:363 ------------------------------ • [SLOW TEST:85.741 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting and stopping the same VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:90 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:91 should success multiple times /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:92 ------------------------------ • [SLOW TEST:17.192 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:111 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:112 should not modify the spec on status update /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:113 ------------------------------ Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running 2018/07/27 07:32:55 http: TLS handshake error from 10.129.0.1:45260: EOF level=info timestamp=2018-07-27T07:32:57.938729Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:32:57.939682Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:32:59.946365Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:33:03.147998Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:33:03.971557Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:33:03.995983Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:33:04.008270Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:33:05 http: TLS handshake error from 10.129.0.1:45268: EOF level=info timestamp=2018-07-27T07:33:13.195641Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:33:15 http: TLS handshake error from 10.129.0.1:45276: EOF level=info timestamp=2018-07-27T07:33:21.260391Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/openapi/v2 proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T07:33:21.264041Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/swagger.json proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T07:33:23.247973Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:33:25 http: TLS handshake error from 10.129.0.1:45284: EOF Pod name: virt-api-7d79764579-vhlzj Pod phase: Running 2018/07/27 07:31:19 http: TLS handshake error from 10.129.0.1:49418: EOF 2018/07/27 07:31:29 http: TLS handshake error from 10.129.0.1:49428: EOF level=info timestamp=2018-07-27T07:31:36.897344Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:31:39 http: TLS handshake error from 10.129.0.1:49436: EOF 2018/07/27 07:31:49 http: TLS handshake error from 10.129.0.1:49444: EOF 2018/07/27 07:31:59 http: TLS handshake error from 10.129.0.1:49452: EOF 2018/07/27 07:32:09 http: TLS handshake error from 10.129.0.1:49460: EOF 2018/07/27 07:32:19 http: TLS handshake error from 10.129.0.1:49468: EOF 2018/07/27 07:32:29 http: TLS handshake error from 10.129.0.1:49476: EOF 2018/07/27 07:32:39 http: TLS handshake error from 10.129.0.1:49484: EOF 2018/07/27 07:32:49 http: TLS handshake error from 10.129.0.1:49492: EOF 2018/07/27 07:32:59 http: TLS handshake error from 10.129.0.1:49500: EOF level=info timestamp=2018-07-27T07:33:06.899413Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:33:09 http: TLS handshake error from 10.129.0.1:49508: EOF 2018/07/27 07:33:19 http: TLS handshake error from 10.129.0.1:49516: EOF Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T07:31:30.649019Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil6cwr kind= uid=14847038-916f-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:31:30.649138Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil6cwr kind= uid=14847038-916f-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:31:30.757481Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil6cwr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil6cwr" level=info timestamp=2018-07-27T07:31:47.995724Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8pgsn kind= uid=1edb592e-916f-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:31:47.996220Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8pgsn kind= uid=1edb592e-916f-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:31:48.013596Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvminmbqd kind= uid=1edcfdf0-916f-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:31:48.014019Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvminmbqd kind= uid=1edcfdf0-916f-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:31:48.031091Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmislzq7 kind= uid=1ede9a76-916f-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:31:48.032722Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmislzq7 kind= uid=1ede9a76-916f-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:31:48.056582Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi55hrd kind= uid=1ee15cdc-916f-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:31:48.056699Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi55hrd kind= uid=1ee15cdc-916f-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:31:48.084822Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:31:48.084922Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:31:48.192518Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvminmbqd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvminmbqd" level=info timestamp=2018-07-27T07:31:48.194431Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8pgsn\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8pgsn" Pod name: virt-controller-7d57d96b65-rhwrp Pod phase: Running level=info timestamp=2018-07-27T07:11:02.455094Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=error timestamp=2018-07-27T07:32:25.929940Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmislzq7 kind= uid=1ede9a76-916f-11e8-96ba-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmislzq7\": the object has been modified; please apply your changes to the latest version and try again" msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-27T07:32:25.930415Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmislzq7\": the object has been modified; please apply your changes to the latest version and try again" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmislzq7" level=info timestamp=2018-07-27T07:32:25.930927Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmislzq7 kind= uid=1ede9a76-916f-11e8-96ba-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-27T07:32:25.936761Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:32:25.936973Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="No update processing required" level=info timestamp=2018-07-27T07:32:25.937544Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmislzq7 kind= uid=1ede9a76-916f-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:32:25.938021Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmislzq7 kind= uid=1ede9a76-916f-11e8-96ba-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-27T07:32:25.940719Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmislzq7 kind= uid=1ede9a76-916f-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=error timestamp=2018-07-27T07:32:25.951962Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifr8n7\": the object has been modified; please apply your changes to the latest version and try again" msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-27T07:32:25.952074Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifr8n7\": the object has been modified; please apply your changes to the latest version and try again" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmifr8n7" level=info timestamp=2018-07-27T07:32:25.952190Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-27T07:32:26.008868Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-27T07:32:26.010449Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:32:26.017472Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-27T07:32:26.029926Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T07:22:57.730664Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.733025Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmit855lrzfrx" level=info timestamp=2018-07-27T07:22:57.947399Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:57.946489Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.949021Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T07:22:57.949147Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.949281Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.950315Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:58.004835Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:22:58.005051Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.005139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.654854Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655110Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.655213Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655362Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmi55hrd-997cz Pod phase: Running level=info timestamp=2018-07-27T07:31:53.236862Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T07:31:53.237206Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-27T07:31:53.239363Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T07:32:03.249092Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T07:32:03.300407Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmi55hrd" level=info timestamp=2018-07-27T07:32:03.302244Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T07:32:03.302401Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" Pod name: virt-launcher-testvmi8pgsn-g5hcd Pod phase: Running level=info timestamp=2018-07-27T07:32:14.954714Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-27T07:32:14.960358Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 19e34113-7ad6-45f0-b511-7a84eb2a04fc" level=info timestamp=2018-07-27T07:32:14.961738Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-27T07:32:15.085237Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:15.315213Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-27T07:32:15.355731Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-27T07:32:15.356153Z pos=manager.go:196 component=virt-launcher namespace=kubevirt-test-default name=testvmi8pgsn kind= uid=1edb592e-916f-11e8-96ba-525500d15501 msg="Domain started." level=info timestamp=2018-07-27T07:32:15.359184Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi8pgsn kind= uid=1edb592e-916f-11e8-96ba-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-27T07:32:15.454905Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:15.455069Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-27T07:32:15.483549Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-27T07:32:15.906916Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi8pgsn kind= uid=1edb592e-916f-11e8-96ba-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-27T07:32:15.911805Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:15.915054Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi8pgsn kind= uid=1edb592e-916f-11e8-96ba-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-27T07:32:15.976558Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 19e34113-7ad6-45f0-b511-7a84eb2a04fc: 184" Pod name: virt-launcher-testvmifr8n7-g9gkh Pod phase: Running level=info timestamp=2018-07-27T07:32:20.497739Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-27T07:32:20.559699Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 57162157-70f4-4090-bcb3-6e2720e24203" level=info timestamp=2018-07-27T07:32:20.562685Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-27T07:32:21.227675Z pos=manager.go:196 component=virt-launcher namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="Domain started." level=info timestamp=2018-07-27T07:32:21.229391Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-27T07:32:21.334668Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:21.334944Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-27T07:32:21.398331Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-27T07:32:21.567380Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 57162157-70f4-4090-bcb3-6e2720e24203: 187" level=info timestamp=2018-07-27T07:32:25.920486Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:25.920818Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-27T07:32:25.981283Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-27T07:32:25.982369Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-27T07:32:26.017195Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:26.028722Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmifr8n7 kind= uid=1ee6b92e-916f-11e8-96ba-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvminmbqd-srgpf Pod phase: Running level=info timestamp=2018-07-27T07:32:13.924346Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-27T07:32:14.842096Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-27T07:32:14.860550Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 1bc32239-7d63-4473-bd60-f5e5b3203c6f" level=info timestamp=2018-07-27T07:32:14.861014Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-27T07:32:14.950474Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:15.345276Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-27T07:32:15.468595Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-27T07:32:15.485083Z pos=manager.go:196 component=virt-launcher namespace=kubevirt-test-default name=testvminmbqd kind= uid=1edcfdf0-916f-11e8-96ba-525500d15501 msg="Domain started." level=info timestamp=2018-07-27T07:32:15.486390Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvminmbqd kind= uid=1edcfdf0-916f-11e8-96ba-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-27T07:32:15.773008Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:15.773178Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-27T07:32:15.804761Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-27T07:32:15.864658Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 1bc32239-7d63-4473-bd60-f5e5b3203c6f: 180" level=info timestamp=2018-07-27T07:32:15.900181Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:15.945737Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvminmbqd kind= uid=1edcfdf0-916f-11e8-96ba-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmislzq7-42ppk Pod phase: Running level=info timestamp=2018-07-27T07:32:19.908508Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-27T07:32:19.986607Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 8a1ac507-782c-4648-8155-6084b203787a" level=info timestamp=2018-07-27T07:32:19.987894Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-27T07:32:20.106647Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:20.859211Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-27T07:32:20.954921Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-27T07:32:20.972236Z pos=manager.go:196 component=virt-launcher namespace=kubevirt-test-default name=testvmislzq7 kind= uid=1ede9a76-916f-11e8-96ba-525500d15501 msg="Domain started." level=info timestamp=2018-07-27T07:32:20.975872Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmislzq7 kind= uid=1ede9a76-916f-11e8-96ba-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-27T07:32:20.990812Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 8a1ac507-782c-4648-8155-6084b203787a: 184" level=info timestamp=2018-07-27T07:32:24.840043Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:24.840225Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-27T07:32:24.868001Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-27T07:32:25.927577Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-27T07:32:25.936184Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmislzq7 kind= uid=1ede9a76-916f-11e8-96ba-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-27T07:32:25.940415Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmislzq7 kind= uid=1ede9a76-916f-11e8-96ba-525500d15501 msg="Synced vmi" • Failure [99.755 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting multiple VMIs /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:129 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:130 should success [It] /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:131 Timed out after 30.006s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance level=info timestamp=2018-07-27T07:31:48.578899Z pos=utils.go:254 component=tests msg="Created virtual machine pod virt-launcher-testvmi8pgsn-g5hcd" level=info timestamp=2018-07-27T07:32:14.444074Z pos=utils.go:254 component=tests msg="Pod owner ship transferred to the node virt-launcher-testvmi8pgsn-g5hcd" level=info timestamp=2018-07-27T07:32:16.344436Z pos=utils.go:254 component=tests msg="VirtualMachineInstance defined." level=info timestamp=2018-07-27T07:32:16.796760Z pos=utils.go:254 component=tests msg="VirtualMachineInstance started." STEP: Checking the number of VirtualMachineInstance disks level=info timestamp=2018-07-27T07:32:16.891167Z pos=utils.go:254 component=tests msg="Created virtual machine pod virt-launcher-testvminmbqd-srgpf" level=info timestamp=2018-07-27T07:32:16.891258Z pos=utils.go:254 component=tests msg="Pod owner ship transferred to the node virt-launcher-testvminmbqd-srgpf" level=info timestamp=2018-07-27T07:32:16.891497Z pos=utils.go:254 component=tests msg="VirtualMachineInstance defined." level=info timestamp=2018-07-27T07:32:18.771490Z pos=utils.go:254 component=tests msg="VirtualMachineInstance started." STEP: Checking the number of VirtualMachineInstance disks level=info timestamp=2018-07-27T07:32:18.863681Z pos=utils.go:254 component=tests msg="Created virtual machine pod virt-launcher-testvmislzq7-42ppk" level=info timestamp=2018-07-27T07:32:18.863724Z pos=utils.go:254 component=tests msg="Pod owner ship transferred to the node virt-launcher-testvmislzq7-42ppk" level=info timestamp=2018-07-27T07:32:26.805245Z pos=utils.go:254 component=tests msg="VirtualMachineInstance defined." level=info timestamp=2018-07-27T07:32:26.820740Z pos=utils.go:254 component=tests msg="VirtualMachineInstance started." STEP: Checking the number of VirtualMachineInstance disks level=info timestamp=2018-07-27T07:32:26.900583Z pos=utils.go:254 component=tests msg="Created virtual machine pod virt-launcher-testvmi55hrd-997cz" • [SLOW TEST:8.127 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:48 with correct permissions /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:51 should be allowed to access subresource endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:52 ------------------------------ ••• ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to start a vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:133 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.002 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to stop a running vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:139 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have correct UUID /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:192 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.030 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have pod IP /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:208 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.005 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to start a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:242 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.004 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to stop a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:250 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ • [SLOW TEST:50.494 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ 2018/07/27 03:34:37 read closing down: EOF 2018/07/27 03:35:26 read closing down: EOF • [SLOW TEST:48.703 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ 2018/07/27 03:37:24 read closing down: EOF • [SLOW TEST:123.088 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ 2018/07/27 03:39:43 read closing down: EOF • [SLOW TEST:145.286 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:52.480 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:113 should create a writeable emptyDisk with the right capacity /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:115 ------------------------------ 2018/07/27 03:40:47 read closing down: EOF • [SLOW TEST:52.509 seconds] 2018/07/27 03:41:39 read closing down: EOF Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined and a specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:163 should create a writeable emptyDisk with the specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:165 ------------------------------ • [SLOW TEST:50.944 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should be successfully started /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:207 ------------------------------ 2018/07/27 03:42:30 read closing down: EOF • [SLOW TEST:113.307 seconds] 2018/07/27 03:44:23 read closing down: EOF 2018/07/27 03:44:23 read closing down: EOF Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should not persist data /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:218 ------------------------------ 2018/07/27 03:47:04 read closing down: EOF • [SLOW TEST:160.686 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With VirtualMachineInstance with two PVCs /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:266 should start vmi multiple times /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:278 ------------------------------ • Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running level=info timestamp=2018-07-27T07:49:31.108325Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:49:31.897699Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:49:32.125192Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:49:32.143693Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:49:32.162381Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:49:35 http: TLS handshake error from 10.129.0.1:46100: EOF level=info timestamp=2018-07-27T07:49:36.916192Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T07:49:38.431010Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:49:45 http: TLS handshake error from 10.129.0.1:46108: EOF level=info timestamp=2018-07-27T07:49:48.474711Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:49:55 http: TLS handshake error from 10.129.0.1:46116: EOF level=info timestamp=2018-07-27T07:49:58.523610Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:50:01.198052Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:50:01.199986Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:50:01.950866Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-vhlzj Pod phase: Running 2018/07/27 07:48:19 http: TLS handshake error from 10.129.0.1:50274: EOF 2018/07/27 07:48:29 http: TLS handshake error from 10.129.0.1:50282: EOF level=info timestamp=2018-07-27T07:48:36.905398Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:48:39 http: TLS handshake error from 10.129.0.1:50290: EOF 2018/07/27 07:48:49 http: TLS handshake error from 10.129.0.1:50298: EOF 2018/07/27 07:48:59 http: TLS handshake error from 10.129.0.1:50306: EOF level=info timestamp=2018-07-27T07:49:06.891258Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:49:09 http: TLS handshake error from 10.129.0.1:50314: EOF 2018/07/27 07:49:19 http: TLS handshake error from 10.129.0.1:50322: EOF 2018/07/27 07:49:29 http: TLS handshake error from 10.129.0.1:50330: EOF 2018/07/27 07:49:39 http: TLS handshake error from 10.129.0.1:50340: EOF 2018/07/27 07:49:49 http: TLS handshake error from 10.129.0.1:50348: EOF level=info timestamp=2018-07-27T07:49:53.474411Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T07:49:54.000681Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:49:59 http: TLS handshake error from 10.129.0.1:50356: EOF Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T07:43:34.234685Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6ztzq kind= uid=c3cecc96-9170-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:43:34.236063Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6ztzq kind= uid=c3cecc96-9170-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:44:23.768461Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=e153aec0-9170-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:44:23.769007Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=e153aec0-9170-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:45:24.911730Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=05c566e5-9171-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:45:24.911887Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=05c566e5-9171-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:45:25.014999Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi6k6wk\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi6k6wk" level=info timestamp=2018-07-27T07:46:04.458253Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:46:04.460737Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:47:04.262291Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5pd9d kind= uid=40fea901-9171-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:47:04.262696Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5pd9d kind= uid=40fea901-9171-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:47:04.356815Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi5pd9d\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi5pd9d, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 40fea901-9171-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi5pd9d" level=info timestamp=2018-07-27T07:47:04.507959Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil5jjr kind= uid=4123934e-9171-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:47:04.508322Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil5jjr kind= uid=4123934e-9171-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:47:04.612102Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5jjr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5jjr" Pod name: virt-controller-7d57d96b65-rhwrp Pod phase: Running level=info timestamp=2018-07-27T07:11:02.455094Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T07:46:18.910838Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-27T07:46:18.914592Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.597229Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-27T07:46:53.597324Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:46:53.597655Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmi6k6wk" level=info timestamp=2018-07-27T07:46:53.806366Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.807694Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:46:53.807809Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:46:53.807867Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-27T07:46:53.807890Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:46:53.825667Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.829402Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:46:53.829509Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:47:03.624525Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:47:03.624649Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T07:22:57.730664Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.733025Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmit855lrzfrx" level=info timestamp=2018-07-27T07:22:57.947399Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:57.946489Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.949021Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T07:22:57.949147Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.949281Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.950315Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:58.004835Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:22:58.005051Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.005139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.654854Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655110Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.655213Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655362Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmil5jjr-hswlp Pod phase: Running level=info timestamp=2018-07-27T07:47:08.141917Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T07:47:08.143560Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-27T07:47:08.146267Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T07:47:18.155530Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T07:47:18.199755Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmil5jjr" level=info timestamp=2018-07-27T07:47:18.202651Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T07:47:18.203091Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" ------------------------------ • Failure [180.493 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 should start it [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:76 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ level=info timestamp=2018-07-27T07:47:05.022622Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmil5jjr-hswlp" Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running level=info timestamp=2018-07-27T07:52:31.661744Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:52:32.284671Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:52:32.689577Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:52:32.706509Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:52:32.720530Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:52:35 http: TLS handshake error from 10.129.0.1:46246: EOF level=info timestamp=2018-07-27T07:52:36.927018Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T07:52:39.520269Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:52:45 http: TLS handshake error from 10.129.0.1:46254: EOF level=info timestamp=2018-07-27T07:52:49.564685Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:52:55 http: TLS handshake error from 10.129.0.1:46262: EOF level=info timestamp=2018-07-27T07:52:59.657068Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:53:01.757283Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:53:01.757963Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:53:02.344315Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-vhlzj Pod phase: Running 2018/07/27 07:50:59 http: TLS handshake error from 10.129.0.1:50404: EOF 2018/07/27 07:51:09 http: TLS handshake error from 10.129.0.1:50412: EOF 2018/07/27 07:51:19 http: TLS handshake error from 10.129.0.1:50420: EOF 2018/07/27 07:51:29 http: TLS handshake error from 10.129.0.1:50428: EOF level=info timestamp=2018-07-27T07:51:36.912217Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:51:39 http: TLS handshake error from 10.129.0.1:50436: EOF 2018/07/27 07:51:49 http: TLS handshake error from 10.129.0.1:50444: EOF 2018/07/27 07:51:59 http: TLS handshake error from 10.129.0.1:50452: EOF level=info timestamp=2018-07-27T07:52:06.913605Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:52:09 http: TLS handshake error from 10.129.0.1:50460: EOF 2018/07/27 07:52:19 http: TLS handshake error from 10.129.0.1:50468: EOF 2018/07/27 07:52:29 http: TLS handshake error from 10.129.0.1:50476: EOF 2018/07/27 07:52:39 http: TLS handshake error from 10.129.0.1:50486: EOF 2018/07/27 07:52:49 http: TLS handshake error from 10.129.0.1:50494: EOF 2018/07/27 07:52:59 http: TLS handshake error from 10.129.0.1:50502: EOF Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T07:45:24.911887Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=05c566e5-9171-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:45:25.014999Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi6k6wk\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi6k6wk" level=info timestamp=2018-07-27T07:46:04.458253Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:46:04.460737Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:47:04.262291Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5pd9d kind= uid=40fea901-9171-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:47:04.262696Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5pd9d kind= uid=40fea901-9171-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:47:04.356815Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi5pd9d\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi5pd9d, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 40fea901-9171-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi5pd9d" level=info timestamp=2018-07-27T07:47:04.507959Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil5jjr kind= uid=4123934e-9171-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:47:04.508322Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil5jjr kind= uid=4123934e-9171-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:47:04.612102Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5jjr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5jjr" level=info timestamp=2018-07-27T07:50:04.828821Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5jjr\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmil5jjr, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4123934e-9171-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5jjr" level=info timestamp=2018-07-27T07:50:04.998475Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmikldq2 kind= uid=acb87094-9171-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:50:04.998662Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmikldq2 kind= uid=acb87094-9171-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:50:05.115046Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmikldq2\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmikldq2" level=info timestamp=2018-07-27T07:50:05.140939Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmikldq2\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmikldq2" Pod name: virt-controller-7d57d96b65-rhwrp Pod phase: Running level=info timestamp=2018-07-27T07:11:02.455094Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T07:46:18.910838Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-27T07:46:18.914592Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.597229Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-27T07:46:53.597324Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:46:53.597655Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmi6k6wk" level=info timestamp=2018-07-27T07:46:53.806366Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.807694Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:46:53.807809Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:46:53.807867Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-27T07:46:53.807890Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:46:53.825667Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.829402Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:46:53.829509Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:47:03.624525Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:47:03.624649Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T07:22:57.730664Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.733025Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmit855lrzfrx" level=info timestamp=2018-07-27T07:22:57.947399Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:57.946489Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.949021Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T07:22:57.949147Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.949281Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.950315Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:58.004835Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:22:58.005051Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.005139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.654854Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655110Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.655213Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655362Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmikldq2-lnc6l Pod phase: Running level=info timestamp=2018-07-27T07:50:08.049715Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T07:50:08.051241Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-27T07:50:08.053803Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T07:50:18.063644Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T07:50:18.098184Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmikldq2" level=info timestamp=2018-07-27T07:50:18.100302Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T07:50:18.100539Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" • Failure [180.479 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 should attach virt-launcher to it [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:82 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ level=info timestamp=2018-07-27T07:50:05.544314Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmikldq2-lnc6l" •••• Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running level=info timestamp=2018-07-27T07:55:32.208276Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:55:32.653166Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:55:33.309087Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:55:33.330387Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:55:33.344882Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:55:35 http: TLS handshake error from 10.129.0.1:46392: EOF level=info timestamp=2018-07-27T07:55:40.515642Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:55:45 http: TLS handshake error from 10.129.0.1:46400: EOF level=info timestamp=2018-07-27T07:55:50.598865Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:55:55 http: TLS handshake error from 10.129.0.1:46408: EOF level=info timestamp=2018-07-27T07:56:00.638226Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:56:02.315642Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:56:02.316050Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:56:02.719738Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:56:05 http: TLS handshake error from 10.129.0.1:46416: EOF Pod name: virt-api-7d79764579-vhlzj Pod phase: Running 2018/07/27 07:53:59 http: TLS handshake error from 10.129.0.1:50550: EOF level=info timestamp=2018-07-27T07:54:06.908660Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:54:09 http: TLS handshake error from 10.129.0.1:50558: EOF 2018/07/27 07:54:19 http: TLS handshake error from 10.129.0.1:50566: EOF 2018/07/27 07:54:29 http: TLS handshake error from 10.129.0.1:50574: EOF 2018/07/27 07:54:39 http: TLS handshake error from 10.129.0.1:50582: EOF 2018/07/27 07:54:49 http: TLS handshake error from 10.129.0.1:50590: EOF 2018/07/27 07:54:59 http: TLS handshake error from 10.129.0.1:50598: EOF 2018/07/27 07:55:09 http: TLS handshake error from 10.129.0.1:50606: EOF 2018/07/27 07:55:19 http: TLS handshake error from 10.129.0.1:50614: EOF 2018/07/27 07:55:29 http: TLS handshake error from 10.129.0.1:50622: EOF level=info timestamp=2018-07-27T07:55:36.958183Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:55:39 http: TLS handshake error from 10.129.0.1:50632: EOF 2018/07/27 07:55:49 http: TLS handshake error from 10.129.0.1:50640: EOF 2018/07/27 07:55:59 http: TLS handshake error from 10.129.0.1:50648: EOF Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T07:50:04.828821Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5jjr\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmil5jjr, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4123934e-9171-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5jjr" level=info timestamp=2018-07-27T07:50:04.998475Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmikldq2 kind= uid=acb87094-9171-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:50:04.998662Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmikldq2 kind= uid=acb87094-9171-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:50:05.115046Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmikldq2\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmikldq2" level=info timestamp=2018-07-27T07:50:05.140939Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmikldq2\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmikldq2" level=info timestamp=2018-07-27T07:53:05.623035Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi68b2s kind= uid=1861af9b-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:53:05.623901Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi68b2s kind= uid=1861af9b-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:53:05.678778Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi68b2s\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi68b2s" level=info timestamp=2018-07-27T07:53:05.705470Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi68b2s\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi68b2s" level=info timestamp=2018-07-27T07:53:05.724422Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi68b2s\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi68b2s, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 1861af9b-9172-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi68b2s" level=info timestamp=2018-07-27T07:53:05.879915Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiltvqc kind= uid=1888ba86-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:53:05.880092Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiltvqc kind= uid=1888ba86-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:53:05.999684Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiltvqc\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiltvqc, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 1888ba86-9172-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiltvqc" level=info timestamp=2018-07-27T07:53:06.176693Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqc2st kind= uid=18b60a24-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:53:06.177903Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqc2st kind= uid=18b60a24-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-controller-7d57d96b65-rhwrp Pod phase: Running level=info timestamp=2018-07-27T07:11:02.455094Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T07:46:18.910838Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-27T07:46:18.914592Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.597229Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-27T07:46:53.597324Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:46:53.597655Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmi6k6wk" level=info timestamp=2018-07-27T07:46:53.806366Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.807694Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:46:53.807809Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:46:53.807867Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-27T07:46:53.807890Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:46:53.825667Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.829402Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:46:53.829509Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:47:03.624525Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:47:03.624649Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T07:22:57.730664Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.733025Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmit855lrzfrx" level=info timestamp=2018-07-27T07:22:57.947399Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:57.946489Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.949021Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T07:22:57.949147Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.949281Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.950315Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:58.004835Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:22:58.005051Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.005139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.654854Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655110Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.655213Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655362Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmiqc2st-t4h2b Pod phase: Running level=info timestamp=2018-07-27T07:53:11.745874Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T07:53:11.746203Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-27T07:53:11.748236Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T07:53:21.760990Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T07:53:21.794178Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmiqc2st" level=info timestamp=2018-07-27T07:53:21.797465Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T07:53:21.797707Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" ------------------------------ • Failure [180.481 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:170 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Alpine as first boot [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ STEP: defining a VirtualMachineInstance with an Alpine disk STEP: adding a Cirros Disk STEP: setting boot order STEP: starting VirtualMachineInstance STEP: Waiting the VirtualMachineInstance start level=info timestamp=2018-07-27T07:53:06.777977Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmiqc2st-t4h2b" Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running level=info timestamp=2018-07-27T07:58:32.967924Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:58:33.948545Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:58:33.963441Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:58:33.974420Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:58:35 http: TLS handshake error from 10.129.0.1:46538: EOF level=info timestamp=2018-07-27T07:58:36.878923Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T07:58:41.453471Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:58:45 http: TLS handshake error from 10.129.0.1:46546: EOF level=info timestamp=2018-07-27T07:58:51.502531Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:58:55 http: TLS handshake error from 10.129.0.1:46554: EOF level=info timestamp=2018-07-27T07:59:01.550082Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:59:02.860735Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:59:02.861281Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T07:59:03.024490Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 07:59:05 http: TLS handshake error from 10.129.0.1:46562: EOF Pod name: virt-api-7d79764579-vhlzj Pod phase: Running 2018/07/27 07:56:59 http: TLS handshake error from 10.129.0.1:50696: EOF 2018/07/27 07:57:09 http: TLS handshake error from 10.129.0.1:50704: EOF 2018/07/27 07:57:19 http: TLS handshake error from 10.129.0.1:50712: EOF 2018/07/27 07:57:29 http: TLS handshake error from 10.129.0.1:50720: EOF level=info timestamp=2018-07-27T07:57:36.932540Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:57:39 http: TLS handshake error from 10.129.0.1:50728: EOF 2018/07/27 07:57:49 http: TLS handshake error from 10.129.0.1:50736: EOF 2018/07/27 07:57:59 http: TLS handshake error from 10.129.0.1:50744: EOF level=info timestamp=2018-07-27T07:58:06.886609Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 07:58:09 http: TLS handshake error from 10.129.0.1:50752: EOF 2018/07/27 07:58:19 http: TLS handshake error from 10.129.0.1:50760: EOF 2018/07/27 07:58:29 http: TLS handshake error from 10.129.0.1:50768: EOF 2018/07/27 07:58:39 http: TLS handshake error from 10.129.0.1:50778: EOF 2018/07/27 07:58:49 http: TLS handshake error from 10.129.0.1:50786: EOF 2018/07/27 07:58:59 http: TLS handshake error from 10.129.0.1:50794: EOF Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T07:50:05.115046Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmikldq2\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmikldq2" level=info timestamp=2018-07-27T07:50:05.140939Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmikldq2\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmikldq2" level=info timestamp=2018-07-27T07:53:05.623035Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi68b2s kind= uid=1861af9b-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:53:05.623901Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi68b2s kind= uid=1861af9b-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:53:05.678778Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi68b2s\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi68b2s" level=info timestamp=2018-07-27T07:53:05.705470Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi68b2s\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi68b2s" level=info timestamp=2018-07-27T07:53:05.724422Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi68b2s\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi68b2s, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 1861af9b-9172-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi68b2s" level=info timestamp=2018-07-27T07:53:05.879915Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiltvqc kind= uid=1888ba86-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:53:05.880092Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiltvqc kind= uid=1888ba86-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:53:05.999684Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiltvqc\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiltvqc, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 1888ba86-9172-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiltvqc" level=info timestamp=2018-07-27T07:53:06.176693Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqc2st kind= uid=18b60a24-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:53:06.177903Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqc2st kind= uid=18b60a24-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:56:06.508738Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqc2st\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiqc2st, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 18b60a24-9172-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqc2st" level=info timestamp=2018-07-27T07:56:06.660630Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibcmd8 kind= uid=8448ab57-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:56:06.660766Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibcmd8 kind= uid=8448ab57-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-controller-7d57d96b65-rhwrp Pod phase: Running level=info timestamp=2018-07-27T07:11:02.455094Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T07:46:18.910838Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-27T07:46:18.914592Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.597229Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-27T07:46:53.597324Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:46:53.597655Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmi6k6wk" level=info timestamp=2018-07-27T07:46:53.806366Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.807694Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:46:53.807809Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:46:53.807867Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-27T07:46:53.807890Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:46:53.825667Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:46:53.829402Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:46:53.829509Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind= uid=1d5940fe-9171-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:47:03.624525Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:47:03.624649Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi6k6wk kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T07:22:57.730664Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.733025Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmit855lrzfrx" level=info timestamp=2018-07-27T07:22:57.947399Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:57.946489Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.949021Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T07:22:57.949147Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.949281Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.950315Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:58.004835Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:22:58.005051Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.005139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.654854Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655110Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.655213Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655362Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmibcmd8-7mfwh Pod phase: Running level=info timestamp=2018-07-27T07:56:10.672166Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T07:56:10.673337Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-27T07:56:10.675304Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T07:56:20.690889Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T07:56:20.722895Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmibcmd8" level=info timestamp=2018-07-27T07:56:20.726549Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T07:56:20.726839Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" • Failure [180.486 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:170 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Cirros as first boot [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.005s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ STEP: defining a VirtualMachineInstance with an Alpine disk STEP: adding a Cirros Disk STEP: setting boot order STEP: starting VirtualMachineInstance STEP: Waiting the VirtualMachineInstance start level=info timestamp=2018-07-27T07:56:07.429935Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmibcmd8-7mfwh" • [SLOW TEST:14.453 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:201 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:202 should retry starting the VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:203 ------------------------------ • [SLOW TEST:16.700 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:201 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:202 should log warning and proceed once the secret is there /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:233 ------------------------------ Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running level=info timestamp=2018-07-27T08:02:02.384910Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T08:02:03.449219Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T08:02:03.469772Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T08:02:03.496759Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 08:02:05 http: TLS handshake error from 10.129.0.1:46708: EOF level=info timestamp=2018-07-27T08:02:12.440835Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 08:02:15 http: TLS handshake error from 10.129.0.1:46716: EOF level=info timestamp=2018-07-27T08:02:22.492065Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 08:02:25 http: TLS handshake error from 10.129.0.1:46724: EOF level=info timestamp=2018-07-27T08:02:32.537829Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T08:02:33.560746Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T08:02:33.562447Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T08:02:33.596336Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 08:02:35 http: TLS handshake error from 10.129.0.1:46732: EOF level=info timestamp=2018-07-27T08:02:36.928720Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 Pod name: virt-api-7d79764579-vhlzj Pod phase: Running level=info timestamp=2018-07-27T08:00:36.911206Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 08:00:39 http: TLS handshake error from 10.129.0.1:50874: EOF 2018/07/27 08:00:49 http: TLS handshake error from 10.129.0.1:50882: EOF 2018/07/27 08:00:59 http: TLS handshake error from 10.129.0.1:50890: EOF level=info timestamp=2018-07-27T08:01:06.915535Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 08:01:09 http: TLS handshake error from 10.129.0.1:50898: EOF 2018/07/27 08:01:19 http: TLS handshake error from 10.129.0.1:50906: EOF 2018/07/27 08:01:29 http: TLS handshake error from 10.129.0.1:50914: EOF 2018/07/27 08:01:39 http: TLS handshake error from 10.129.0.1:50924: EOF 2018/07/27 08:01:49 http: TLS handshake error from 10.129.0.1:50932: EOF 2018/07/27 08:01:59 http: TLS handshake error from 10.129.0.1:50940: EOF level=info timestamp=2018-07-27T08:02:06.948106Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 08:02:09 http: TLS handshake error from 10.129.0.1:50948: EOF 2018/07/27 08:02:19 http: TLS handshake error from 10.129.0.1:50956: EOF 2018/07/27 08:02:29 http: TLS handshake error from 10.129.0.1:50964: EOF Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T07:56:06.508738Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqc2st\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiqc2st, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 18b60a24-9172-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqc2st" level=info timestamp=2018-07-27T07:56:06.660630Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibcmd8 kind= uid=8448ab57-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:56:06.660766Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibcmd8 kind= uid=8448ab57-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:59:06.980307Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmibcmd8\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmibcmd8, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 8448ab57-9172-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmibcmd8" level=info timestamp=2018-07-27T07:59:07.139751Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5p48z kind= uid=efdca661-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:59:07.140258Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5p48z kind= uid=efdca661-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:59:07.235386Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi5p48z\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi5p48z" level=info timestamp=2018-07-27T07:59:07.250748Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi5p48z\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi5p48z" level=info timestamp=2018-07-27T07:59:21.595824Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi78wkr kind= uid=f87a9b1c-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:59:21.596025Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi78wkr kind= uid=f87a9b1c-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:59:21.805155Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi78wkr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi78wkr" level=info timestamp=2018-07-27T07:59:38.294619Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmix8xjm kind= uid=026ec280-9173-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:59:38.294775Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmix8xjm kind= uid=026ec280-9173-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:59:38.411387Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmix8xjm\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmix8xjm" level=info timestamp=2018-07-27T07:59:38.445232Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmix8xjm\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmix8xjm" Pod name: virt-controller-7d57d96b65-rhwrp Pod phase: Running level=info timestamp=2018-07-27T07:11:02.455094Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T07:59:37.594186Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind= uid=f87a9b1c-9172-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:59:37.618282Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind= uid=f87a9b1c-9172-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmi78wkr" level=info timestamp=2018-07-27T07:59:37.836877Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind= uid=f87a9b1c-9172-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:59:37.837034Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T07:59:37.837062Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T07:59:37.838304Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:59:37.838644Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:59:37.838790Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:59:37.838842Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:59:37.838904Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:59:37.841607Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:59:37.892288Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:59:37.892461Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:59:37.892551Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:59:37.892617Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T07:22:57.730664Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.733025Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmit855lrzfrx" level=info timestamp=2018-07-27T07:22:57.947399Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:57.946489Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.949021Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T07:22:57.949147Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.949281Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.950315Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:58.004835Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:22:58.005051Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.005139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.654854Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655110Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.655213Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655362Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmix8xjm-4pqtn Pod phase: Running level=info timestamp=2018-07-27T07:59:41.867552Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T07:59:41.868764Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-27T07:59:41.871530Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T07:59:51.880197Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T07:59:51.918625Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmix8xjm" level=info timestamp=2018-07-27T07:59:51.920519Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T07:59:51.920681Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" • Failure [180.506 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-launcher crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:281 should be stopped and have Failed phase [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:282 Timed out after 90.005s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ level=info timestamp=2018-07-27T07:59:38.869732Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmix8xjm-4pqtn" Pod name: disks-images-provider-vs2cw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-xjp4h Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-qkfk6 Pod phase: Running level=info timestamp=2018-07-27T08:05:04.014257Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T08:05:04.129467Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T08:05:04.134027Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 08:05:05 http: TLS handshake error from 10.129.0.1:46854: EOF level=info timestamp=2018-07-27T08:05:13.487781Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 08:05:15 http: TLS handshake error from 10.129.0.1:46862: EOF level=info timestamp=2018-07-27T08:05:20.273228Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/openapi/v2 proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T08:05:20.274492Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/swagger.json proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-27T08:05:23.531703Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 08:05:25 http: TLS handshake error from 10.129.0.1:46870: EOF level=info timestamp=2018-07-27T08:05:33.571074Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T08:05:34.070839Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T08:05:34.223590Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-27T08:05:34.233235Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/27 08:05:35 http: TLS handshake error from 10.129.0.1:46878: EOF Pod name: virt-api-7d79764579-vhlzj Pod phase: Running 2018/07/27 08:03:49 http: TLS handshake error from 10.129.0.1:51028: EOF 2018/07/27 08:03:59 http: TLS handshake error from 10.129.0.1:51036: EOF level=info timestamp=2018-07-27T08:04:06.914623Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 08:04:09 http: TLS handshake error from 10.129.0.1:51044: EOF 2018/07/27 08:04:19 http: TLS handshake error from 10.129.0.1:51052: EOF 2018/07/27 08:04:29 http: TLS handshake error from 10.129.0.1:51060: EOF level=info timestamp=2018-07-27T08:04:36.919725Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 08:04:39 http: TLS handshake error from 10.129.0.1:51070: EOF 2018/07/27 08:04:49 http: TLS handshake error from 10.129.0.1:51078: EOF 2018/07/27 08:04:59 http: TLS handshake error from 10.129.0.1:51086: EOF level=info timestamp=2018-07-27T08:05:06.898892Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/27 08:05:09 http: TLS handshake error from 10.129.0.1:51094: EOF 2018/07/27 08:05:19 http: TLS handshake error from 10.129.0.1:51102: EOF 2018/07/27 08:05:29 http: TLS handshake error from 10.129.0.1:51110: EOF level=info timestamp=2018-07-27T08:05:36.932109Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 Pod name: virt-controller-7d57d96b65-ghzq5 Pod phase: Running level=info timestamp=2018-07-27T07:56:06.660766Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibcmd8 kind= uid=8448ab57-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:59:06.980307Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmibcmd8\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmibcmd8, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 8448ab57-9172-11e8-96ba-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmibcmd8" level=info timestamp=2018-07-27T07:59:07.139751Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5p48z kind= uid=efdca661-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:59:07.140258Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5p48z kind= uid=efdca661-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:59:07.235386Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi5p48z\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi5p48z" level=info timestamp=2018-07-27T07:59:07.250748Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi5p48z\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi5p48z" level=info timestamp=2018-07-27T07:59:21.595824Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi78wkr kind= uid=f87a9b1c-9172-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:59:21.596025Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi78wkr kind= uid=f87a9b1c-9172-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:59:21.805155Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi78wkr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi78wkr" level=info timestamp=2018-07-27T07:59:38.294619Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmix8xjm kind= uid=026ec280-9173-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T07:59:38.294775Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmix8xjm kind= uid=026ec280-9173-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-27T07:59:38.411387Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmix8xjm\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmix8xjm" level=info timestamp=2018-07-27T07:59:38.445232Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmix8xjm\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmix8xjm" level=info timestamp=2018-07-27T08:02:38.809496Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7gdnr kind= uid=6e060b23-9173-11e8-96ba-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-27T08:02:38.810950Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7gdnr kind= uid=6e060b23-9173-11e8-96ba-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-controller-7d57d96b65-rhwrp Pod phase: Running level=info timestamp=2018-07-27T07:11:02.455094Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-45mmt Pod phase: Running level=info timestamp=2018-07-27T07:59:37.594186Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind= uid=f87a9b1c-9172-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:59:37.618282Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind= uid=f87a9b1c-9172-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmi78wkr" level=info timestamp=2018-07-27T07:59:37.836877Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind= uid=f87a9b1c-9172-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:59:37.837034Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T07:59:37.837062Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T07:59:37.838304Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:59:37.838644Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:59:37.838790Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:59:37.838842Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:59:37.838904Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:59:37.841607Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:59:37.892288Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:59:37.892461Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:59:37.892551Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:59:37.892617Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi78wkr kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-d6ndr Pod phase: Running level=info timestamp=2018-07-27T07:22:57.730664Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.733025Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmit855lrzfrx" level=info timestamp=2018-07-27T07:22:57.947399Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:57.946489Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind= uid=d7c7e1d8-916d-11e8-96ba-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.949021Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-27T07:22:57.949147Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-27T07:22:57.949281Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:57.950315Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-27T07:22:58.004835Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-27T07:22:58.005051Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.005139Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.654854Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655110Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-27T07:22:58.655213Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-27T07:22:58.655362Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmit855lrzfrx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmi7gdnr-nkvpf Pod phase: Running level=info timestamp=2018-07-27T08:02:42.379537Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-27T08:02:42.380233Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-27T08:02:42.381631Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-27T08:02:52.390143Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-27T08:02:52.454423Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmi7gdnr" level=info timestamp=2018-07-27T08:02:52.456690Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-27T08:02:52.456877Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" • Failure [180.484 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:304 should recover and continue management [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:305 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 ------------------------------ level=info timestamp=2018-07-27T08:02:39.400753Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmi7gdnr-nkvpf" • [SLOW TEST:24.309 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler is responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:335 should indicate that a node is ready for vmis /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:336 ------------------------------ • [SLOW TEST:82.626 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler is not responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:366 the node controller should react /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:405 ------------------------------ • [SLOW TEST:18.328 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with node tainted /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:458 the vmi with tolerations should be scheduled /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:480 ------------------------------ • ------------------------------ S [SKIPPING] [0.266 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:530 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-default [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:535 ------------------------------ S [SKIPPING] [0.080 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:530 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-alternative [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:535 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.117 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:591 should enable emulation in virt-launcher [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:603 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:599 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.179 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:591 should be reflected in domain XML [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:640 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:599 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.080 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:591 should request a TUN device but not KVM [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:684 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:599 ------------------------------ ••••panic: test timed out after 1h30m0s goroutine 11020 [running]: testing.(*M).startAlarm.func1() /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1240 +0xfc created by time.goFunc /gimme/.gimme/versions/go1.10.linux.amd64/src/time/sleep.go:172 +0x44 goroutine 1 [chan receive, 90 minutes]: testing.(*T).Run(0xc42089ce10, 0x139cbc3, 0x9, 0x142ef00, 0x4801e6) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:825 +0x301 testing.runTests.func1(0xc42089cd20) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1063 +0x64 testing.tRunner(0xc42089cd20, 0xc420389df8) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:777 +0xd0 testing.runTests(0xc4204b9b00, 0x1d2fa50, 0x1, 0x1, 0x412009) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1061 +0x2c4 testing.(*M).Run(0xc4208d4280, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:978 +0x171 main.main() _testmain.go:44 +0x151 goroutine 5 [chan receive]: kubevirt.io/kubevirt/vendor/github.com/golang/glog.(*loggingT).flushDaemon(0x1d5b280) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/golang/glog/glog.go:879 +0x8b created by kubevirt.io/kubevirt/vendor/github.com/golang/glog.init.0 /root/go/src/kubevirt.io/kubevirt/vendor/github.com/golang/glog/glog.go:410 +0x203 goroutine 7 [syscall, 90 minutes]: os/signal.signal_recv(0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/sigqueue.go:139 +0xa6 os/signal.loop() /gimme/.gimme/versions/go1.10.linux.amd64/src/os/signal/signal_unix.go:22 +0x22 created by os/signal.init.0 /gimme/.gimme/versions/go1.10.linux.amd64/src/os/signal/signal_unix.go:28 +0x41 goroutine 9 [sleep]: time.Sleep(0xb106c68) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/time.go:102 +0x166 kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol.realClock.Sleep(0xb106c68) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol/throttle.go:66 +0x2b kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol.(*tokenBucketRateLimiter).Accept(0xc420997100) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol/throttle.go:91 +0xbd kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest.(*Request).tryThrottle(0xc420989e00) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest/request.go:478 +0x1fd kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest.(*Request).Do(0xc420989e00, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest/request.go:733 +0x62 kubevirt.io/kubevirt/pkg/kubecli.(*vmis).Get(0xc420b22720, 0xc4207637b4, 0xc, 0xc420eda800, 0xc420b22720, 0x8, 0x7f4234cefd90) /root/go/src/kubevirt.io/kubevirt/pkg/kubecli/vmi.go:369 +0x125 kubevirt.io/kubevirt/tests.waitForVMIStart.func1(0x0) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1039 +0xc2 reflect.Value.call(0x1189560, 0xc42080cf60, 0x13, 0x1396c8e, 0x4, 0xc420e8ec68, 0x0, 0x0, 0x1189560, 0x1189560, ...) /gimme/.gimme/versions/go1.10.linux.amd64/src/reflect/value.go:447 +0x969 reflect.Value.Call(0x1189560, 0xc42080cf60, 0x13, 0xc420e8ec68, 0x0, 0x0, 0x44b21b, 0xc42045a7d8, 0xc420e8eca0) /gimme/.gimme/versions/go1.10.linux.amd64/src/reflect/value.go:308 +0xa4 kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion.(*AsyncAssertion).pollActual(0xc420ecbe80, 0x0, 0x0, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go:71 +0x9f kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion.(*AsyncAssertion).match(0xc420ecbe80, 0x14c29c0, 0xc420419460, 0x412801, 0xc420419470, 0x1, 0x1, 0xc420419470) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go:141 +0x305 kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion.(*AsyncAssertion).Should(0xc420ecbe80, 0x14c29c0, 0xc420419460, 0xc420419470, 0x1, 0x1, 0xc420ecbe80) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go:48 +0x62 kubevirt.io/kubevirt/tests.waitForVMIStart(0x14b86e0, 0xc420e04500, 0x5a, 0x0, 0x0, 0x1d79901) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1049 +0x6d6 kubevirt.io/kubevirt/tests.WaitForSuccessfulVMIStart(0x14b86e0, 0xc420e04500, 0x1d79938, 0x0) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1072 +0x43 kubevirt.io/kubevirt/tests_test.glob..func16.5.1() /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:841 +0x19d kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).runSync(0xc4208d8d20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go:113 +0x9c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).run(0xc4208d8d20, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go:64 +0x13e kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*ItNode).Run(0xc42051b180, 0x14b4b80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go:26 +0x7f kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec.(*Spec).runSample(0xc420424f00, 0x0, 0x14b4b80, 0xc420059500) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec/spec.go:203 +0x648 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec.(*Spec).Run(0xc420424f00, 0x14b4b80, 0xc420059500) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec/spec.go:138 +0xff kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpec(0xc42010bcc0, 0xc420424f00, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:200 +0x10d kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpecs(0xc42010bcc0, 0x1) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:170 +0x329 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run(0xc42010bcc0, 0xb) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:66 +0x11b kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/suite.(*Suite).Run(0xc4200ceb40, 0x7f4234ca4110, 0xc42089ce10, 0x139f1a6, 0xb, 0xc4204b9b40, 0x2, 0x2, 0x14d1400, 0xc420059500, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/suite/suite.go:62 +0x27c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo.RunSpecsWithCustomReporters(0x14b5be0, 0xc42089ce10, 0x139f1a6, 0xb, 0xc4204b9b20, 0x2, 0x2, 0x2) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go:221 +0x258 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo.RunSpecsWithDefaultAndCustomReporters(0x14b5be0, 0xc42089ce10, 0x139f1a6, 0xb, 0xc4204b3c00, 0x1, 0x1, 0x1) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go:209 +0xab kubevirt.io/kubevirt/tests_test.TestTests(0xc42089ce10) /root/go/src/kubevirt.io/kubevirt/tests/tests_suite_test.go:43 +0xaa testing.tRunner(0xc42089ce10, 0x142ef00) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:777 +0xd0 created by testing.(*T).Run /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:824 +0x2e0 goroutine 10 [chan receive, 90 minutes]: kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).registerForInterrupts(0xc42010bcc0, 0xc420735aa0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:223 +0xd1 created by kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:60 +0x88 goroutine 11 [select, 90 minutes, locked to thread]: runtime.gopark(0x14310d8, 0x0, 0x13996e5, 0x6, 0x18, 0x1) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/proc.go:291 +0x11a runtime.selectgo(0xc420481750, 0xc420735b60) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/select.go:392 +0xe50 runtime.ensureSigM.func1() /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/signal_unix.go:549 +0x1f4 runtime.goexit() /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/asm_amd64.s:2361 +0x1 goroutine 38 [IO wait]: internal/poll.runtime_pollWait(0x7f4234c93f00, 0x72, 0xc42008b850) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/netpoll.go:173 +0x57 internal/poll.(*pollDesc).wait(0xc420793918, 0x72, 0xffffffffffffff00, 0x14b6da0, 0x1c467d0) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_poll_runtime.go:85 +0x9b internal/poll.(*pollDesc).waitRead(0xc420793918, 0xc420a7c000, 0x8000, 0x8000) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_poll_runtime.go:90 +0x3d internal/poll.(*FD).Read(0xc420793900, 0xc420a7c000, 0x8000, 0x8000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_unix.go:157 +0x17d net.(*netFD).Read(0xc420793900, 0xc420a7c000, 0x8000, 0x8000, 0x0, 0x8, 0x7ffb) /gimme/.gimme/versions/go1.10.linux.amd64/src/net/fd_unix.go:202 +0x4f net.(*conn).Read(0xc4204ee830, 0xc420a7c000, 0x8000, 0x8000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/net/net.go:176 +0x6a crypto/tls.(*block).readFromUntil(0xc4206c8420, 0x7f4234ca41e0, 0xc4204ee830, 0x5, 0xc4204ee830, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:493 +0x96 crypto/tls.(*Conn).readRecord(0xc420436000, 0x1431217, 0xc420436120, 0x20) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:595 +0xe0 crypto/tls.(*Conn).Read(0xc420436000, 0xc42068b000, 0x1000, 0x1000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:1156 +0x100 bufio.(*Reader).Read(0xc4200aa5a0, 0xc4208be2d8, 0x9, 0x9, 0xc420a242f8, 0xc420f205e0, 0xc42008bd10) /gimme/.gimme/versions/go1.10.linux.amd64/src/bufio/bufio.go:216 +0x238 io.ReadAtLeast(0x14b3980, 0xc4200aa5a0, 0xc4208be2d8, 0x9, 0x9, 0x9, 0xc42008bce0, 0xc42008bce0, 0x406614) /gimme/.gimme/versions/go1.10.linux.amd64/src/io/io.go:309 +0x86 io.ReadFull(0x14b3980, 0xc4200aa5a0, 0xc4208be2d8, 0x9, 0x9, 0xc420a242a0, 0xc42008bd10, 0xc400004901) /gimme/.gimme/versions/go1.10.linux.amd64/src/io/io.go:327 +0x58 kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.readFrameHeader(0xc4208be2d8, 0x9, 0x9, 0x14b3980, 0xc4200aa5a0, 0x0, 0xc400000000, 0x7efa2d, 0xc42008bfb0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/frame.go:237 +0x7b kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*Framer).ReadFrame(0xc4208be2a0, 0xc420398db0, 0x0, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/frame.go:492 +0xa4 kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*clientConnReadLoop).run(0xc42008bfb0, 0x142fe58, 0xc42047b7b0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:1428 +0x8e kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*ClientConn).readLoop(0xc4200d6ea0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:1354 +0x76 created by kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*Transport).newClientConn /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:579 +0x651 goroutine 6688 [chan send, 31 minutes]: kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/watch.(*StreamWatcher).receive(0xc420680f60) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go:114 +0x114 created by kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/watch.NewStreamWatcher /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go:60 +0xa8 goroutine 5418 [chan send, 44 minutes]: kubevirt.io/kubevirt/tests_test.glob..func23.1.2.1.1(0x14ef100, 0xc42061d180, 0xc4204ee068, 0xc42071d500, 0xc4207a4320, 0xc4207a4330) /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:81 +0x138 created by kubevirt.io/kubevirt/tests_test.glob..func23.1.2.1 /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:73 +0x386 make: *** [functest] Error 2 + make cluster-down ./cluster/down.sh