+ export WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + [[ openshift-3.10-release =~ openshift-.* ]] + [[ openshift-3.10-release =~ .*-crio-.* ]] + export KUBEVIRT_PROVIDER=os-3.10.0 + KUBEVIRT_PROVIDER=os-3.10.0 + export KUBEVIRT_NUM_NODES=2 + KUBEVIRT_NUM_NODES=2 + export NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + export NAMESPACE=kube-system + NAMESPACE=kube-system + trap '{ make cluster-down; }' EXIT SIGINT SIGTERM SIGSTOP + make cluster-down ./cluster/down.sh + make cluster-up ./cluster/up.sh Downloading ....... Downloading ....... 2018/08/04 10:49:34 Waiting for host: 192.168.66.102:22 2018/08/04 10:49:37 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/08/04 10:49:45 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/08/04 10:49:53 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/08/04 10:49:58 Connected to tcp://192.168.66.102:22 + systemctl stop origin-node.service + rm -rf /etc/origin/ /etc/etcd/ /var/lib/origin /var/lib/etcd/ ++ docker ps -q + containers= + '[' -n '' ']' ++ docker ps -q -a + containers='2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3' + '[' -n '2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3' ']' + docker rm -f 2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3 2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3 2018/08/04 10:50:04 Waiting for host: 192.168.66.101:22 2018/08/04 10:50:07 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/08/04 10:50:15 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/08/04 10:50:23 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/08/04 10:50:31 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/08/04 10:50:36 Connected to tcp://192.168.66.101:22 + inventory_file=/root/inventory + openshift_ansible=/root/openshift-ansible + echo '[new_nodes]' + sed -i '/\[OSEv3:children\]/a new_nodes' /root/inventory + nodes_found=false ++ seq 2 100 + for i in '$(seq 2 100)' ++ printf node%02d 2 + node=node02 ++ printf 192.168.66.1%02d 2 + node_ip=192.168.66.102 + set +e + ping 192.168.66.102 -c 1 PING 192.168.66.102 (192.168.66.102) 56(84) bytes of data. 64 bytes from 192.168.66.102: icmp_seq=1 ttl=64 time=1.25 ms --- 192.168.66.102 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 1.259/1.259/1.259/0.000 ms Found node02. Adding it to the inventory. + '[' 0 -ne 0 ']' + nodes_found=true + set -e + echo '192.168.66.102 node02' + echo 'Found node02. Adding it to the inventory.' + echo 'node02 openshift_node_group_name="node-config-compute" openshift_schedulable=true openshift_ip=192.168.66.102' + for i in '$(seq 2 100)' ++ printf node%02d 3 + node=node03 ++ printf 192.168.66.1%02d 3 + node_ip=192.168.66.103 + set +e + ping 192.168.66.103 -c 1 PING 192.168.66.103 (192.168.66.103) 56(84) bytes of data. From 192.168.66.101 icmp_seq=1 Destination Host Unreachable --- 192.168.66.103 ping statistics --- 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms + '[' 1 -ne 0 ']' + break + '[' true = true ']' + ansible-playbook -i /root/inventory /root/openshift-ansible/playbooks/openshift-node/scaleup.yml PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) PLAY [Ensure there are new_nodes] ********************************************** TASK [fail] ******************************************************************** skipping: [localhost] TASK [fail] ******************************************************************** skipping: [localhost] PLAY [Initialization Checkpoint Start] ***************************************** TASK [Set install initialization 'In Progress'] ******************************** ok: [node01] PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) [WARNING]: Could not match supplied host pattern, ignoring: oo_lb_to_config PLAY [Ensure that all non-node hosts are accessible] *************************** TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Initialize basic host facts] ********************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/deprecations.yml for node01, node02 TASK [openshift_sanitize_inventory : Check for usage of deprecated variables] *** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : debug] ************************************ skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_stats] ******************************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Assign deprecated variables to correct counterparts] *** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml for node01, node02 included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml for node01, node02 TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Standardize on latest variable names] ***** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Normalize openshift_release] ************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/unsupported.yml for node01, node02 TASK [openshift_sanitize_inventory : Ensure that openshift_use_dnsmasq is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that openshift_node_dnsmasq_install_network_manager_hook is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* skipping: [node01] => (item=openshift_hosted_etcd_storage_kind) skipping: [node02] => (item=openshift_hosted_etcd_storage_kind) TASK [openshift_sanitize_inventory : Ensure that dynamic provisioning is set if using dynamic storage] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure clusterid is set along with the cloudprovider] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that all requires vsphere configuration variables are set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : ensure provider configuration variables are defined] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure removed web console extension variables are not set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that web console port matches API server port] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : At least one master is schedulable] ******* skipping: [node01] skipping: [node02] TASK [Detecting Operating System from ostree_booted] *************************** ok: [node02] ok: [node01] TASK [set openshift_deployment_type if unset] ********************************** skipping: [node01] skipping: [node02] TASK [check for node already bootstrapped] ************************************* ok: [node02] ok: [node01] TASK [initialize_facts set fact openshift_is_bootstrapped] ********************* ok: [node01] ok: [node02] TASK [initialize_facts set fact openshift_is_atomic and openshift_is_containerized] *** ok: [node01] ok: [node02] TASK [Determine Atomic Host Docker Version] ************************************ skipping: [node01] skipping: [node02] TASK [assert atomic host docker version is 1.12 or later] ********************** skipping: [node01] skipping: [node02] PLAY [Retrieve existing master configs and validate] *************************** TASK [openshift_control_plane : stat] ****************************************** ok: [node01] TASK [openshift_control_plane : slurp] ***************************************** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [openshift_control_plane : Check for file paths outside of /etc/origin/master in master's config] *** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Initialize special first-master variables] ******************************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Disable web console if required] ***************************************** TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Setup yum repositories for all hosts] ************************************ TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Install Red Hat Subscription manager] ******************* skipping: [node02] TASK [rhel_subscribe : Is host already registered?] **************************** skipping: [node02] TASK [rhel_subscribe : Register host] ****************************************** skipping: [node02] TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Determine if OpenShift Pool Already Attached] *********** skipping: [node02] TASK [rhel_subscribe : Attach to OpenShift Pool] ******************************* skipping: [node02] TASK [rhel_subscribe : Satellite preparation] ********************************** skipping: [node02] TASK [openshift_repos : openshift_repos detect ostree] ************************* ok: [node02] TASK [openshift_repos : Ensure libselinux-python is installed] ***************** ok: [node02] TASK [openshift_repos : Remove openshift_additional.repo file] ***************** ok: [node02] TASK [openshift_repos : Create any additional repos that are defined] ********** TASK [openshift_repos : include_tasks] ***************************************** skipping: [node02] TASK [openshift_repos : include_tasks] ***************************************** included: /root/openshift-ansible/roles/openshift_repos/tasks/centos_repos.yml for node02 TASK [openshift_repos : Configure origin gpg keys] ***************************** ok: [node02] TASK [openshift_repos : Configure correct origin release repository] *********** ok: [node02] => (item=/root/openshift-ansible/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2) TASK [openshift_repos : Ensure clean repo cache in the event repos have been changed manually] *** changed: [node02] => { "msg": "First run of openshift_repos" } TASK [openshift_repos : Record that openshift_repos already ran] *************** ok: [node02] RUNNING HANDLER [openshift_repos : refresh cache] ****************************** changed: [node02] PLAY [Install packages necessary for installer] ******************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Determine if chrony is installed] **************************************** changed: [node02] [WARNING]: Consider using the yum, dnf or zypper module rather than running rpm. If you need to use command because yum, dnf or zypper is insufficient you can add warn=False to this command task or set command_warnings=False in ansible.cfg to get rid of this message. TASK [Install ntp package] ***************************************************** skipping: [node02] TASK [Start and enable ntpd/chronyd] ******************************************* changed: [node02] TASK [Ensure openshift-ansible installer package deps are installed] *********** ok: [node02] => (item=iproute) ok: [node02] => (item=dbus-python) ok: [node02] => (item=PyYAML) ok: [node02] => (item=python-ipaddress) ok: [node02] => (item=libsemanage-python) ok: [node02] => (item=yum-utils) ok: [node02] => (item=python-docker) PLAY [Initialize cluster facts] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [get openshift_current_version] ******************************************* ok: [node02] ok: [node01] TASK [set_fact openshift_portal_net if present on masters] ********************* ok: [node01] ok: [node02] TASK [Gather Cluster facts] **************************************************** changed: [node02] changed: [node01] TASK [Set fact of no_proxy_internal_hostnames] ********************************* skipping: [node01] skipping: [node02] TASK [Initialize openshift.node.sdn_mtu] *************************************** changed: [node02] ok: [node01] PLAY [Initialize etcd host variables] ****************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** ok: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] => { "msg": "openshift_pkg_version was not defined. Falling back to -3.10.0" } TASK [openshift_version : set_fact] ******************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Verify Requirements] ***************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [Run variable sanity checks] ********************************************** ok: [node01] TASK [Validate openshift_node_groups and openshift_node_group_name] ************ ok: [node01] PLAY [Initialization Checkpoint End] ******************************************* TASK [Set install initialization 'Complete'] *********************************** ok: [node01] PLAY [Validate node hostnames] ************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Query DNS for IP address of node02] ************************************** ok: [node02] TASK [Validate openshift_hostname when defined] ******************************** skipping: [node02] TASK [Validate openshift_ip exists on node when defined] *********************** skipping: [node02] PLAY [Configure os_firewall] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [os_firewall : Detecting Atomic Host Operating System] ******************** ok: [node02] TASK [os_firewall : Set fact r_os_firewall_is_atomic] ************************** ok: [node02] TASK [os_firewall : Fail - Firewalld is not supported on Atomic Host] ********** skipping: [node02] TASK [os_firewall : Install firewalld packages] ******************************** skipping: [node02] TASK [os_firewall : Ensure iptables services are not enabled] ****************** skipping: [node02] => (item=iptables) skipping: [node02] => (item=ip6tables) TASK [os_firewall : Wait 10 seconds after disabling iptables] ****************** skipping: [node02] TASK [os_firewall : Start and enable firewalld service] ************************ skipping: [node02] TASK [os_firewall : need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail] *** skipping: [node02] TASK [os_firewall : Restart polkitd] ******************************************* skipping: [node02] TASK [os_firewall : Wait for polkit action to have been created] *************** skipping: [node02] TASK [os_firewall : Ensure firewalld service is not enabled] ******************* ok: [node02] TASK [os_firewall : Wait 10 seconds after disabling firewalld] ***************** skipping: [node02] TASK [os_firewall : Install iptables packages] ********************************* ok: [node02] => (item=iptables) ok: [node02] => (item=iptables-services) TASK [os_firewall : Start and enable iptables service] ************************* ok: [node02 -> node02] => (item=node02) TASK [os_firewall : need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail] *** skipping: [node02] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [container_runtime : Setup the docker-storage for overlay] **************** skipping: [node02] TASK [container_runtime : Create file system on extra volume device] *********** TASK [container_runtime : Create mount entry for extra volume] ***************** PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** ok: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* ok: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Add http_proxy to /etc/atomic.conf] ****************** skipping: [node02] TASK [container_runtime : Add https_proxy to /etc/atomic.conf] ***************** skipping: [node02] TASK [container_runtime : Add no_proxy to /etc/atomic.conf] ******************** skipping: [node02] TASK [container_runtime : Get current installed Docker version] **************** ok: [node02] TASK [container_runtime : Error out if Docker pre-installed but too old] ******* skipping: [node02] TASK [container_runtime : Error out if requested Docker is too old] ************ skipping: [node02] TASK [container_runtime : Install Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure docker.service.d directory exists] ************ ok: [node02] TASK [container_runtime : Configure Docker service unit file] ****************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Set registry params] ********************************* skipping: [node02] => (item={u'reg_conf_var': u'ADD_REGISTRY', u'reg_flag': u'--add-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'BLOCK_REGISTRY', u'reg_flag': u'--block-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'INSECURE_REGISTRY', u'reg_flag': u'--insecure-registry', u'reg_fact_val': []}) TASK [container_runtime : Place additional/blocked/insecure registries in /etc/containers/registries.conf] *** skipping: [node02] TASK [container_runtime : Set Proxy Settings] ********************************** skipping: [node02] => (item={u'reg_conf_var': u'HTTP_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'HTTPS_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'NO_PROXY', u'reg_fact_val': u''}) TASK [container_runtime : Set various Docker options] ************************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Configure Docker Network OPTIONS] ******************** ok: [node02] TASK [container_runtime : Detect if docker is already started] ***************** ok: [node02] TASK [container_runtime : Start the Docker service] **************************** ok: [node02] TASK [container_runtime : set_fact] ******************************************** ok: [node02] TASK [container_runtime : Check for docker_storage_path/overlay2] ************** ok: [node02] TASK [container_runtime : Fixup SELinux permissions for docker] **************** changed: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] TASK [container_runtime : Fail if Atomic Host since this is an rpm request] **** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Check that overlay is in the kernel] ***************** skipping: [node02] TASK [container_runtime : Add overlay to modprobe.d] *************************** skipping: [node02] TASK [container_runtime : Manually modprobe overlay into the kernel] *********** skipping: [node02] TASK [container_runtime : Enable and start systemd-modules-load] *************** skipping: [node02] TASK [container_runtime : Install cri-o] *************************************** skipping: [node02] TASK [container_runtime : Remove CRI-O default configuration files] ************ skipping: [node02] => (item=/etc/cni/net.d/200-loopback.conf) skipping: [node02] => (item=/etc/cni/net.d/100-crio-bridge.conf) TASK [container_runtime : Create the CRI-O configuration] ********************** skipping: [node02] TASK [container_runtime : Ensure CNI configuration directory exists] *********** skipping: [node02] TASK [container_runtime : Add iptables allow rules] **************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove iptables rules] ******************************* TASK [container_runtime : Add firewalld allow rules] *************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove firewalld allow rules] ************************ TASK [container_runtime : Configure the CNI network] *************************** skipping: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-network] ****************** skipping: [node02] TASK [container_runtime : Start the CRI-O service] ***************************** skipping: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* skipping: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** skipping: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** skipping: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** skipping: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Node Preparation Checkpoint Start] *************************************** TASK [Set Node preparation 'In Progress'] ************************************** ok: [node01] PLAY [Only target nodes that have not yet been bootstrapped] ******************* TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [add_host] **************************************************************** skipping: [localhost] => (item=node02) ok: [localhost] => (item=node01) PLAY [Disable excluders] ******************************************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/disable.yml for node02 TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-docker-excluder version detected] ************ skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-docker-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-excluder version detected] ******************* skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** skipping: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] PLAY [Configure nodes] ********************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_cloud_provider : Set cloud provider facts] ********************* skipping: [node02] TASK [openshift_cloud_provider : Create cloudprovider config dir] ************** skipping: [node02] TASK [openshift_cloud_provider : include the defined cloud provider files] ***** skipping: [node02] TASK [openshift_node : fail] *************************************************** skipping: [node02] TASK [openshift_node : Check for NetworkManager service] *********************** ok: [node02] TASK [openshift_node : Set fact using_network_manager] ************************* ok: [node02] TASK [openshift_node : Install dnsmasq] **************************************** ok: [node02] TASK [openshift_node : ensure origin/node directory exists] ******************** changed: [node02] => (item=/etc/origin) changed: [node02] => (item=/etc/origin/node) TASK [openshift_node : Install NetworkManager during node_bootstrap provisioning] *** skipping: [node02] TASK [openshift_node : Install network manager dispatch script] **************** skipping: [node02] TASK [openshift_node : Install dnsmasq configuration] ************************** ok: [node02] TASK [openshift_node : Deploy additional dnsmasq.conf] ************************* skipping: [node02] TASK [openshift_node : Enable dnsmasq] ***************************************** ok: [node02] TASK [openshift_node : Install network manager dispatch script] **************** ok: [node02] TASK [openshift_node : Add iptables allow rules] ******************************* ok: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) ok: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) ok: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) ok: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) ok: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove iptables rules] ********************************** TASK [openshift_node : Add firewalld allow rules] ****************************** skipping: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) skipping: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) skipping: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) skipping: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) skipping: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove firewalld allow rules] *************************** TASK [openshift_node : Checking for journald.conf] ***************************** ok: [node02] TASK [openshift_node : Create journald persistence directories] **************** ok: [node02] TASK [openshift_node : Update journald setup] ********************************** ok: [node02] => (item={u'var': u'Storage', u'val': u'persistent'}) ok: [node02] => (item={u'var': u'Compress', u'val': True}) ok: [node02] => (item={u'var': u'SyncIntervalSec', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitInterval', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitBurst', u'val': 10000}) ok: [node02] => (item={u'var': u'SystemMaxUse', u'val': u'8G'}) ok: [node02] => (item={u'var': u'SystemKeepFree', u'val': u'20%'}) ok: [node02] => (item={u'var': u'SystemMaxFileSize', u'val': u'10M'}) ok: [node02] => (item={u'var': u'MaxRetentionSec', u'val': u'1month'}) ok: [node02] => (item={u'var': u'MaxFileSec', u'val': u'1day'}) ok: [node02] => (item={u'var': u'ForwardToSyslog', u'val': False}) ok: [node02] => (item={u'var': u'ForwardToWall', u'val': False}) TASK [openshift_node : Restart journald] *************************************** skipping: [node02] TASK [openshift_node : Disable swap] ******************************************* ok: [node02] TASK [openshift_node : Install node, clients, and conntrack packages] ********** ok: [node02] => (item={u'name': u'origin-node-3.10.0*'}) ok: [node02] => (item={u'name': u'origin-clients-3.10.0*'}) ok: [node02] => (item={u'name': u'conntrack-tools'}) TASK [openshift_node : Restart cri-o] ****************************************** skipping: [node02] TASK [openshift_node : restart NetworkManager to ensure resolv.conf is present] *** changed: [node02] TASK [openshift_node : sysctl] ************************************************* ok: [node02] TASK [openshift_node : Check for credentials file for registry auth] *********** skipping: [node02] TASK [openshift_node : Create credentials for registry auth] ******************* skipping: [node02] TASK [openshift_node : Create credentials for registry auth (alternative)] ***** skipping: [node02] TASK [openshift_node : Setup ro mount of /root/.docker for containerized hosts] *** skipping: [node02] TASK [openshift_node : Check that node image is present] *********************** changed: [node02] TASK [openshift_node : Pre-pull node image] ************************************ skipping: [node02] TASK [openshift_node : Copy node script to the node] *************************** ok: [node02] TASK [openshift_node : Install Node service file] ****************************** ok: [node02] TASK [openshift_node : Ensure old system path is set] ************************** skipping: [node02] => (item=/etc/origin/openvswitch) skipping: [node02] => (item=/var/lib/kubelet) skipping: [node02] => (item=/opt/cni/bin) TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Copy node container image to ostree storage] ************ skipping: [node02] TASK [openshift_node : Install or Update node system container] **************** skipping: [node02] TASK [openshift_node : Restart network manager to ensure networking configuration is in place] *** skipping: [node02] TASK [openshift_node : Configure Node settings] ******************************** ok: [node02] => (item={u'regex': u'^OPTIONS=', u'line': u'OPTIONS='}) ok: [node02] => (item={u'regex': u'^DEBUG_LOGLEVEL=', u'line': u'DEBUG_LOGLEVEL=2'}) ok: [node02] => (item={u'regex': u'^IMAGE_VERSION=', u'line': u'IMAGE_VERSION=v3.10.0-rc.0'}) TASK [openshift_node : Configure Proxy Settings] ******************************* skipping: [node02] => (item={u'regex': u'^HTTP_PROXY=', u'line': u'HTTP_PROXY='}) skipping: [node02] => (item={u'regex': u'^HTTPS_PROXY=', u'line': u'HTTPS_PROXY='}) skipping: [node02] => (item={u'regex': u'^NO_PROXY=', u'line': u'NO_PROXY=[],172.30.0.0/16,10.128.0.0/14'}) TASK [openshift_node : file] *************************************************** skipping: [node02] TASK [openshift_node : Create the Node config] ********************************* changed: [node02] TASK [openshift_node : Configure Node Environment Variables] ******************* TASK [openshift_node : Ensure the node static pod directory exists] ************ changed: [node02] TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Install NFS storage plugin dependencies] **************** ok: [node02] TASK [openshift_node : Check for existence of nfs sebooleans] ****************** ok: [node02] => (item=virt_use_nfs) ok: [node02] => (item=virt_sandbox_use_nfs) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-04 10:58:21.615010', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.024961', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-08-04 10:58:21.590049', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-04 10:58:22.803759', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.014904', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-08-04 10:58:22.788855', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-04 10:58:21.615010', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.024961', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-08-04 10:58:21.590049', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-04 10:58:22.803759', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.014904', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-08-04 10:58:22.788855', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install GlusterFS storage plugin dependencies] ********** ok: [node02] TASK [openshift_node : Check for existence of fusefs sebooleans] *************** ok: [node02] => (item=virt_use_fusefs) ok: [node02] => (item=virt_sandbox_use_fusefs) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-04 10:58:29.290827', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.015826', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-08-04 10:58:29.275001', '_ansible_ignore_errors': None, 'failed': False}) ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-04 10:58:30.445494', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.015746', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-08-04 10:58:30.429748', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-04 10:58:29.290827', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.015826', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-08-04 10:58:29.275001', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-04 10:58:30.445494', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.015746', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-08-04 10:58:30.429748', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install Ceph storage plugin dependencies] *************** ok: [node02] TASK [openshift_node : Install iSCSI storage plugin dependencies] ************** ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=device-mapper-multipath) TASK [openshift_node : restart services] *************************************** ok: [node02] => (item=multipathd) ok: [node02] => (item=rpcbind) ok: [node02] => (item=iscsid) TASK [openshift_node : Template multipath configuration] *********************** changed: [node02] TASK [openshift_node : Enable and start multipath] ***************************** changed: [node02] TASK [tuned : Check for tuned package] ***************************************** ok: [node02] TASK [tuned : Set tuned OpenShift variables] *********************************** ok: [node02] TASK [tuned : Ensure directory structure exists] ******************************* ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Ensure files are populated from templates] *********************** skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Make tuned use the recommended tuned profile on restart] ********* changed: [node02] => (item=/etc/tuned/active_profile) changed: [node02] => (item=/etc/tuned/profile_mode) TASK [tuned : Restart tuned service] ******************************************* changed: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Install logrotate] ******* ok: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Setup logrotate.d scripts] *** PLAY [node bootstrap config] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_node : install needed rpm(s)] ********************************** ok: [node02] => (item=origin-node) ok: [node02] => (item=origin-docker-excluder) ok: [node02] => (item=ansible) ok: [node02] => (item=bash-completion) ok: [node02] => (item=docker) ok: [node02] => (item=haproxy) ok: [node02] => (item=dnsmasq) ok: [node02] => (item=ntp) ok: [node02] => (item=logrotate) ok: [node02] => (item=httpd-tools) ok: [node02] => (item=bind-utils) ok: [node02] => (item=firewalld) ok: [node02] => (item=libselinux-python) ok: [node02] => (item=conntrack-tools) ok: [node02] => (item=openssl) ok: [node02] => (item=iproute) ok: [node02] => (item=python-dbus) ok: [node02] => (item=PyYAML) ok: [node02] => (item=yum-utils) ok: [node02] => (item=glusterfs-fuse) ok: [node02] => (item=device-mapper-multipath) ok: [node02] => (item=nfs-utils) ok: [node02] => (item=cockpit-ws) ok: [node02] => (item=cockpit-system) ok: [node02] => (item=cockpit-bridge) ok: [node02] => (item=cockpit-docker) ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=ceph-common) TASK [openshift_node : create the directory for node] ************************** skipping: [node02] TASK [openshift_node : laydown systemd override] ******************************* skipping: [node02] TASK [openshift_node : update the sysconfig to have necessary variables] ******* ok: [node02] => (item={u'regexp': u'^KUBECONFIG=.*', u'line': u'KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig'}) TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : disable origin-node service] **************************** changed: [node02] => (item=origin-node.service) TASK [openshift_node : Check for RPM generated config marker file .config_managed] *** ok: [node02] TASK [openshift_node : create directories for bootstrapping] ******************* ok: [node02] => (item=/root/openshift_bootstrap) changed: [node02] => (item=/var/lib/origin/openshift.local.config) changed: [node02] => (item=/var/lib/origin/openshift.local.config/node) ok: [node02] => (item=/etc/docker/certs.d/docker-registry.default.svc:5000) TASK [openshift_node : laydown the bootstrap.yml file for on boot configuration] *** ok: [node02] TASK [openshift_node : Create a symlink to the node client CA for the docker registry] *** ok: [node02] TASK [openshift_node : Remove RPM generated config files if present] *********** skipping: [node02] => (item=master) skipping: [node02] => (item=.config_managed) TASK [openshift_node : find all files in /etc/origin/node so we can remove them] *** skipping: [node02] TASK [openshift_node : Remove everything except the resolv.conf required for node] *** skipping: [node02] TASK [openshift_node_group : create node config template] ********************** changed: [node02] TASK [openshift_node_group : remove existing node config] ********************** changed: [node02] TASK [openshift_node_group : Ensure required directories are present] ********** ok: [node02] => (item=/etc/origin/node/pods) changed: [node02] => (item=/etc/origin/node/certificates) TASK [openshift_node_group : Update the sysconfig to group "node-config-compute"] *** changed: [node02] TASK [set_fact] **************************************************************** ok: [node02] PLAY [Re-enable excluder if it was previously enabled] ************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/enable.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] PLAY [Node Preparation Checkpoint End] ***************************************** TASK [Set Node preparation 'Complete'] ***************************************** ok: [node01] PLAY [Distribute bootstrap and start nodes] ************************************ TASK [openshift_node : Gather node information] ******************************** changed: [node02] ok: [node01] TASK [openshift_node : Copy master bootstrap config locally] ******************* ok: [node02] TASK [openshift_node : Distribute bootstrap kubeconfig if one does not exist] *** ok: [node01] changed: [node02] TASK [openshift_node : Start and enable node for bootstrapping] **************** changed: [node02] changed: [node01] TASK [openshift_node : Get node logs] ****************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : debug] ************************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : fail] *************************************************** skipping: [node02] skipping: [node01] PLAY [Approve any pending CSR requests from inventory nodes] ******************* TASK [Dump all candidate bootstrap hostnames] ********************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Find all hostnames for bootstrapping] ************************************ ok: [node01] TASK [Dump the bootstrap hostnames] ******************************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Approve bootstrap nodes] ************************************************* changed: [node01] TASK [Get CSRs] **************************************************************** skipping: [node01] TASK [Report approval errors] ************************************************** skipping: [node01] PLAY [Ensure any inventory labels are applied to the nodes] ******************** TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_manage_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_manage_node : Wait for Node Registration] ********************** ok: [node02 -> node01] ok: [node01 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/config.yml for node02, node01 TASK [openshift_manage_node : Set node schedulability] ************************* ok: [node02 -> node01] ok: [node01 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/set_default_node_role.yml for node02, node01 TASK [openshift_manage_node : Retrieve nodes that are marked with the infra selector or the legacy infra selector] *** ok: [node02 -> node01] TASK [openshift_manage_node : Label infra or legacy infra nodes with the new role label] *** TASK [openshift_manage_node : Retrieve non-infra, non-master nodes that are not yet labeled compute] *** ok: [node02 -> node01] TASK [openshift_manage_node : label non-master non-infra nodes compute] ******** TASK [openshift_manage_node : Label all-in-one master as a compute node] ******* skipping: [node02] PLAY RECAP ********************************************************************* localhost : ok=30 changed=0 unreachable=0 failed=0 node01 : ok=71 changed=3 unreachable=0 failed=0 node02 : ok=155 changed=33 unreachable=0 failed=0 INSTALLER STATUS *************************************************************** Initialization : Complete (0:04:01) Node Preparation : Complete (0:04:11) Sending file modes: C0755 110489328 oc Sending file modes: C0600 5649 admin.kubeconfig Cluster "node01:8443" set. Cluster "node01:8443" set. + set +e + kubectl get nodes --no-headers + cluster/kubectl.sh get nodes --no-headers node01 Ready compute,infra,master 27d v1.10.0+b81c8f8 node02 Ready compute 50s v1.10.0+b81c8f8 + kubectl_rc=0 + '[' 0 -ne 0 ']' ++ kubectl get nodes --no-headers ++ cluster/kubectl.sh get nodes --no-headers ++ grep NotReady + '[' -n '' ']' + set -e + echo 'Nodes are ready:' Nodes are ready: + kubectl get nodes + cluster/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION node01 Ready compute,infra,master 27d v1.10.0+b81c8f8 node02 Ready compute 51s v1.10.0+b81c8f8 + make cluster-sync ./cluster/build.sh Building ... Untagged: localhost:33089/kubevirt/virt-controller:devel Untagged: localhost:33089/kubevirt/virt-controller@sha256:838ebc841afb7b2329983cae416367a263f05d99fc688ad651c800c983d81951 Deleted: sha256:78c24a9f8e37da82b8883c3629eac63e3c69bf35436c29446fee90f86374a1a8 Untagged: localhost:33089/kubevirt/virt-launcher:devel Untagged: localhost:33089/kubevirt/virt-launcher@sha256:20a5199c636cc11acc12b9b7fc6425e16a3dae83edab5915cec79a5920176a44 Deleted: sha256:a4f0eb05ca0f21a4288ccd3f32419a8293d0c2e222967715bdde2a224d65339e Untagged: localhost:33089/kubevirt/virt-handler:devel Untagged: localhost:33089/kubevirt/virt-handler@sha256:26f02e3ee8a3ccfb0028b4113d1ff59c3f3fd54b388a7b9f14626304e25c0a96 Deleted: sha256:de94ffd0d300594eeb1f2bd066b07a83d429660ac628a8312357827383abfbcc Untagged: localhost:33089/kubevirt/virt-api:devel Untagged: localhost:33089/kubevirt/virt-api@sha256:557f4272dc42ad137ea742ac227ff9f88c596daf4c7df89b90a1e6ab66f6966f Deleted: sha256:27406f8a58dd100827597e610485701cc9656aea1239775df958fe58e690ca49 Untagged: localhost:33089/kubevirt/subresource-access-test:devel Untagged: localhost:33089/kubevirt/subresource-access-test@sha256:def781cd8e1996755d672feadb16200f6c5a0adde967938c4ae7ec08f277ca78 Deleted: sha256:21d168d0e75023827e5c4e6bf16971dd5b0c412ad3db8979de2be5e5e65cbb75 Untagged: localhost:33089/kubevirt/example-hook-sidecar:devel Untagged: localhost:33089/kubevirt/example-hook-sidecar@sha256:46aa6b65b750b1f8a4d10801b82a36110799871dda1e51f1ebea73b58983a27e Deleted: sha256:e7e3d922bf3b4c3494b5f5fe5f470e00b9099f5da46f95e8162517f8e039240a sha256:ceba12cbc33e4e37a707840478a630db561e2427b78c8c9f9cd6d0b73276ab32 go version go1.10 linux/amd64 go version go1.10 linux/amd64 make[1]: Entering directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' hack/dockerized "./hack/check.sh && KUBEVIRT_VERSION= ./hack/build-go.sh install " && ./hack/build-copy-artifacts.sh sha256:ceba12cbc33e4e37a707840478a630db561e2427b78c8c9f9cd6d0b73276ab32 go version go1.10 linux/amd64 go version go1.10 linux/amd64 find: '/root/go/src/kubevirt.io/kubevirt/_out/cmd': No such file or directory Compiling tests... compiled tests.test hack/build-docker.sh build Sending build context to Docker daemon 40.39 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-controller ---> Using cache ---> b4f3251c6468 Step 4/8 : WORKDIR /home/virt-controller ---> Using cache ---> 813752072d9d Step 5/8 : USER 1001 ---> Using cache ---> 88b3556f36b4 Step 6/8 : COPY virt-controller /usr/bin/virt-controller ---> 40a0171b0ddf Removing intermediate container df281a28a5db Step 7/8 : ENTRYPOINT /usr/bin/virt-controller ---> Running in d0f2474479cc ---> 9f48d221e152 Removing intermediate container d0f2474479cc Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-controller" '' ---> Running in 1fe0cf0e1bc2 ---> 567df56a86f6 Removing intermediate container 1fe0cf0e1bc2 Successfully built 567df56a86f6 Sending build context to Docker daemon 43.32 MB Step 1/10 : FROM kubevirt/libvirt:4.2.0 ---> 5f0bfe81a3e0 Step 2/10 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 09010a005182 Step 3/10 : RUN dnf -y install socat genisoimage util-linux libcgroup-tools ethtool net-tools sudo && dnf -y clean all && test $(id -u qemu) = 107 # make sure that the qemu user really is 107 ---> Using cache ---> fc9481693838 Step 4/10 : COPY virt-launcher /usr/bin/virt-launcher ---> 4c4d2257f03e Removing intermediate container 4878c9d115c7 Step 5/10 : COPY kubevirt-sudo /etc/sudoers.d/kubevirt ---> 57efc779ae44 Removing intermediate container 095d3688ad74 Step 6/10 : RUN setcap CAP_NET_BIND_SERVICE=+eip /usr/bin/qemu-system-x86_64 ---> Running in e672f0efab5d  ---> b0fb7e237180 Removing intermediate container e672f0efab5d Step 7/10 : RUN mkdir -p /usr/share/kubevirt/virt-launcher ---> Running in 7c77911d131f  ---> 3dac89ecc482 Removing intermediate container 7c77911d131f Step 8/10 : COPY entrypoint.sh libvirtd.sh sock-connector /usr/share/kubevirt/virt-launcher/ ---> 227f95d76f23 Removing intermediate container eeb9e8f14e54 Step 9/10 : ENTRYPOINT /usr/share/kubevirt/virt-launcher/entrypoint.sh ---> Running in fb5946ae53b8 ---> daf8fb4fc399 Removing intermediate container fb5946ae53b8 Step 10/10 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-launcher" '' ---> Running in e034976f7b41 ---> eca2bd90051a Removing intermediate container e034976f7b41 Successfully built eca2bd90051a Sending build context to Docker daemon 38.45 MB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/5 : COPY virt-handler /usr/bin/virt-handler ---> 1e40bb9722ca Removing intermediate container 1e0a74fcbf19 Step 4/5 : ENTRYPOINT /usr/bin/virt-handler ---> Running in 7d85a151b860 ---> 32e3741d04c8 Removing intermediate container 7d85a151b860 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-handler" '' ---> Running in 21ef98fb65e0 ---> 0b64e0167faa Removing intermediate container 21ef98fb65e0 Successfully built 0b64e0167faa Sending build context to Docker daemon 38.81 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-api ---> Using cache ---> 3cff23506e80 Step 4/8 : WORKDIR /home/virt-api ---> Using cache ---> e94c5606b96b Step 5/8 : USER 1001 ---> Using cache ---> af16317199f5 Step 6/8 : COPY virt-api /usr/bin/virt-api ---> a945cbab7e1c Removing intermediate container 8d524dff7511 Step 7/8 : ENTRYPOINT /usr/bin/virt-api ---> Running in 19082e69fb77 ---> bdc4502f4e7d Removing intermediate container 19082e69fb77 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-api" '' ---> Running in 902007f7afd6 ---> 2b1cf7002d76 Removing intermediate container 902007f7afd6 Successfully built 2b1cf7002d76 Sending build context to Docker daemon 4.096 kB Step 1/7 : FROM fedora:28 ---> cc510acfcd70 Step 2/7 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/7 : ENV container docker ---> Using cache ---> aed3ca4ac3a3 Step 4/7 : RUN mkdir -p /images/custom /images/alpine && truncate -s 64M /images/custom/disk.img && curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /images/alpine/disk.img ---> Using cache ---> c7d0cf8fc982 Step 5/7 : ADD entrypoint.sh / ---> Using cache ---> 0393e5ee0c37 Step 6/7 : CMD /entrypoint.sh ---> Using cache ---> 23798f49dea3 Step 7/7 : LABEL "disks-images-provider" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> 628bfca144bf Successfully built 628bfca144bf Sending build context to Docker daemon 2.56 kB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/5 : ENV container docker ---> Using cache ---> aed3ca4ac3a3 Step 4/5 : RUN dnf -y install procps-ng nmap-ncat && dnf -y clean all ---> Using cache ---> d8c990eaf575 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "vm-killer" '' ---> Using cache ---> 2ed275c4bfd0 Successfully built 2ed275c4bfd0 Sending build context to Docker daemon 5.12 kB Step 1/7 : FROM debian:sid ---> 68f33cf86aab Step 2/7 : MAINTAINER "David Vossel" \ ---> Using cache ---> 50fc79ebe51c Step 3/7 : ENV container docker ---> Using cache ---> b8e063496923 Step 4/7 : RUN apt-get update && apt-get install -y bash curl bzip2 qemu-utils && mkdir -p /disk && rm -rf /var/lib/apt/lists/* ---> Using cache ---> 8adb1572b35c Step 5/7 : ADD entry-point.sh / ---> Using cache ---> 8c0c5a52e4df Step 6/7 : CMD /entry-point.sh ---> Using cache ---> 1a4b838e5dee Step 7/7 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "registry-disk-v1alpha" '' ---> Using cache ---> 7aa3fd44f8c9 Successfully built 7aa3fd44f8c9 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:33339/kubevirt/registry-disk-v1alpha:devel ---> 7aa3fd44f8c9 Step 2/4 : MAINTAINER "David Vossel" \ ---> Using cache ---> 5e0c3d37503b Step 3/4 : RUN curl https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img > /disk/cirros.img ---> Using cache ---> 2acb8de4d71e Step 4/4 : LABEL "cirros-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> 89f88bb54bf2 Successfully built 89f88bb54bf2 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:33339/kubevirt/registry-disk-v1alpha:devel ---> 7aa3fd44f8c9 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 776bfb123af4 Step 3/4 : RUN curl -g -L https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 > /disk/fedora.qcow2 ---> Using cache ---> 288211d2b493 Step 4/4 : LABEL "fedora-cloud-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> 0912477735f2 Successfully built 0912477735f2 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:33339/kubevirt/registry-disk-v1alpha:devel ---> 7aa3fd44f8c9 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 776bfb123af4 Step 3/4 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /disk/alpine.iso ---> Using cache ---> c0c8be599bed Step 4/4 : LABEL "alpine-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> f4b34e404811 Successfully built f4b34e404811 Sending build context to Docker daemon 35.59 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virtctl ---> Using cache ---> d74088d7a4fc Step 4/8 : WORKDIR /home/virtctl ---> Using cache ---> c8c857bf8d96 Step 5/8 : USER 1001 ---> Using cache ---> 36730a67b946 Step 6/8 : COPY subresource-access-test /subresource-access-test ---> 21dc031bff7c Removing intermediate container 827eb50004fb Step 7/8 : ENTRYPOINT /subresource-access-test ---> Running in f62a30ecf17d ---> 7a4e43d77581 Removing intermediate container f62a30ecf17d Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "subresource-access-test" '' ---> Running in 001334361e28 ---> 4f886e2690ea Removing intermediate container 001334361e28 Successfully built 4f886e2690ea Sending build context to Docker daemon 3.072 kB Step 1/9 : FROM fedora:28 ---> cc510acfcd70 Step 2/9 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/9 : ENV container docker ---> Using cache ---> aed3ca4ac3a3 Step 4/9 : RUN dnf -y install make git gcc && dnf -y clean all ---> Using cache ---> 6050b24a5d85 Step 5/9 : ENV GIMME_GO_VERSION 1.9.2 ---> Using cache ---> 0447d2178073 Step 6/9 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> 291db82d955f Step 7/9 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 793556477837 Step 8/9 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/masterzen/winrm-cli ---> Using cache ---> fd5c6e1f9461 Step 9/9 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "winrmcli" '' ---> Using cache ---> 91d1be1bcbe4 Successfully built 91d1be1bcbe4 Sending build context to Docker daemon 36.8 MB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 71a8c548e503 Step 3/5 : COPY example-hook-sidecar /example-hook-sidecar ---> 1089a3b4adbc Removing intermediate container 302ec67c2c3d Step 4/5 : ENTRYPOINT /example-hook-sidecar ---> Running in 4e8fa0f7de24 ---> c474aec4ff0f Removing intermediate container 4e8fa0f7de24 Step 5/5 : LABEL "example-hook-sidecar" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in 8b24859be78b ---> 45dbecfbfc21 Removing intermediate container 8b24859be78b Successfully built 45dbecfbfc21 hack/build-docker.sh push The push refers to a repository [localhost:33339/kubevirt/virt-controller] 7878a469e1f1: Preparing b2f5abdac324: Preparing 891e1e4ef82a: Preparing b2f5abdac324: Pushed 7878a469e1f1: Pushed 891e1e4ef82a: Pushed devel: digest: sha256:e632fac8cfaefd788dacbfd149b17d14e02dc43570c2a095c9ced6437a47ced7 size: 949 The push refers to a repository [localhost:33339/kubevirt/virt-launcher] 08729bd26fdb: Preparing 5c7040a8a41f: Preparing eebca04d4fcc: Preparing 839657ddc9cd: Preparing af0ab1540c23: Preparing 0b99c4111657: Preparing da38cf808aa5: Preparing b83399358a92: Preparing 186d8b3e4fd8: Preparing fa6154170bf5: Preparing 5eefb9960a36: Preparing 891e1e4ef82a: Preparing 0b99c4111657: Waiting 891e1e4ef82a: Waiting fa6154170bf5: Waiting da38cf808aa5: Waiting b83399358a92: Waiting 186d8b3e4fd8: Waiting 5eefb9960a36: Waiting 08729bd26fdb: Pushed 839657ddc9cd: Pushed 5c7040a8a41f: Pushed da38cf808aa5: Pushed b83399358a92: Pushed 186d8b3e4fd8: Pushed fa6154170bf5: Pushed 891e1e4ef82a: Mounted from kubevirt/virt-controller eebca04d4fcc: Pushed 0b99c4111657: Pushed af0ab1540c23: Pushed 5eefb9960a36: Pushed devel: digest: sha256:6e4b7ba8a9a639b7aed01d760a8eec97c9f938287882d6b2b676b8bf55dd182c size: 2828 The push refers to a repository [localhost:33339/kubevirt/virt-handler] a7eece844cc1: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-launcher a7eece844cc1: Pushed devel: digest: sha256:c0cbf62a4b527e9dab153b3e95722e0e0c66bee14a830e3a90ada2ac1df090d8 size: 740 The push refers to a repository [localhost:33339/kubevirt/virt-api] adf73408e3af: Preparing afd1d781e4d1: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-handler afd1d781e4d1: Pushed adf73408e3af: Pushed devel: digest: sha256:bd3d7df0278e7d56a4a1150fb36aa0bdbe849e502a3183332ee5297e3988d77c size: 948 The push refers to a repository [localhost:33339/kubevirt/disks-images-provider] dc0875c44573: Preparing 8fc77a44094f: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-api dc0875c44573: Pushed 8fc77a44094f: Pushed devel: digest: sha256:d23d8d42ec6e15ae7ed6e778918aafb30b1527dcab703a192077860ecf796c74 size: 948 The push refers to a repository [localhost:33339/kubevirt/vm-killer] d1b69e768421: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/disks-images-provider d1b69e768421: Pushed devel: digest: sha256:e18b0719b6c92415bd3a9d4e45278bb4a4f7bccefbd3fe8c958aad9b913bc32c size: 740 The push refers to a repository [localhost:33339/kubevirt/registry-disk-v1alpha] 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 2a15632f54d4: Pushed 91a924e03d7c: Pushed 25edbec0eaea: Pushed devel: digest: sha256:93dbd4b6c598eae77e68f8119e129d092b75cfe0573a46c653a4578391b54edd size: 948 The push refers to a repository [localhost:33339/kubevirt/cirros-registry-disk-demo] f287bddc58c9: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 2a15632f54d4: Mounted from kubevirt/registry-disk-v1alpha 91a924e03d7c: Mounted from kubevirt/registry-disk-v1alpha 25edbec0eaea: Mounted from kubevirt/registry-disk-v1alpha f287bddc58c9: Pushed devel: digest: sha256:d84ec6e1c3b1e790318b351a867571430b0f77022b609bf72c7edc11774869a2 size: 1160 The push refers to a repository [localhost:33339/kubevirt/fedora-cloud-registry-disk-demo] 191bddb21627: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 2a15632f54d4: Mounted from kubevirt/cirros-registry-disk-demo 91a924e03d7c: Mounted from kubevirt/cirros-registry-disk-demo 25edbec0eaea: Mounted from kubevirt/cirros-registry-disk-demo 191bddb21627: Pushed devel: digest: sha256:721c5dc3b73e50b865b6d395e48884382c391509e18b4d77a3a27456a1eea65c size: 1161 The push refers to a repository [localhost:33339/kubevirt/alpine-registry-disk-demo] 8a362b640dc9: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 91a924e03d7c: Mounted from kubevirt/fedora-cloud-registry-disk-demo 25edbec0eaea: Mounted from kubevirt/fedora-cloud-registry-disk-demo 2a15632f54d4: Mounted from kubevirt/fedora-cloud-registry-disk-demo 8a362b640dc9: Pushed devel: digest: sha256:6c9639e0cb8ed67572ed78aad285cce752608f39802ce49856474162feae16f5 size: 1160 The push refers to a repository [localhost:33339/kubevirt/subresource-access-test] 192ea8b726bb: Preparing 4052ce9d0aff: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/vm-killer 4052ce9d0aff: Pushed 192ea8b726bb: Pushed devel: digest: sha256:1a64851cb47b9b9675dffde80bae5b7f3dfa4bb8d18f962dfbc9dd8018b63acb size: 948 The push refers to a repository [localhost:33339/kubevirt/winrmcli] 64ccc7ac4271: Preparing 4242962b50c3: Preparing 0e374d8c733e: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/subresource-access-test 64ccc7ac4271: Pushed 0e374d8c733e: Pushed 4242962b50c3: Pushed devel: digest: sha256:7ba212e34e7bbac39ae9d54624462c338a98987d0eb9f59f8bb24b123847d8b4 size: 1165 The push refers to a repository [localhost:33339/kubevirt/example-hook-sidecar] 1ced8cd501c8: Preparing 39bae602f753: Preparing 1ced8cd501c8: Pushed 39bae602f753: Pushed devel: digest: sha256:8a1a42072821fd92cba8babc82dcfb8c663a47134522e155f25a5e3c3e9bde7a size: 740 make[1]: Leaving directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' Done ./cluster/clean.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-190-g6daf72e ++ KUBEVIRT_VERSION=v0.7.0-190-g6daf72e + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:50a4b8ee3e07d592e7e4fbf3eb1401980a5947499dfdc3d847c085b5775aaa9a ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:33339/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Cleaning up ...' Cleaning up ... + cluster/kubectl.sh get vmis --all-namespaces -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,FINALIZERS:.metadata.finalizers --no-headers + grep foregroundDeleteVirtualMachine + read p error: the server doesn't have a resource type "vmis" + _kubectl delete ds -l kubevirt.io -n kube-system --cascade=false --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=libvirt --force --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=virt-handler --force --grace-period 0 No resources found + namespaces=(default ${namespace}) + for i in '${namespaces[@]}' + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete deployment -l kubevirt.io No resources found + _kubectl -n default delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rs -l kubevirt.io No resources found + _kubectl -n default delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete services -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n default delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete secrets -l kubevirt.io No resources found + _kubectl -n default delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pv -l kubevirt.io No resources found + _kubectl -n default delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pvc -l kubevirt.io No resources found + _kubectl -n default delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete ds -l kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n default delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pods -l kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n default delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rolebinding -l kubevirt.io No resources found + _kubectl -n default delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete roles -l kubevirt.io No resources found + _kubectl -n default delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterroles -l kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n default get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + for i in '${namespaces[@]}' + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete deployment -l kubevirt.io No resources found + _kubectl -n kube-system delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rs -l kubevirt.io No resources found + _kubectl -n kube-system delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete services -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n kube-system delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete secrets -l kubevirt.io No resources found + _kubectl -n kube-system delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pv -l kubevirt.io No resources found + _kubectl -n kube-system delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pvc -l kubevirt.io No resources found + _kubectl -n kube-system delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete ds -l kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n kube-system delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pods -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete roles -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterroles -l kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + sleep 2 + echo Done Done ./cluster/deploy.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-190-g6daf72e ++ KUBEVIRT_VERSION=v0.7.0-190-g6daf72e + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:50a4b8ee3e07d592e7e4fbf3eb1401980a5947499dfdc3d847c085b5775aaa9a ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:33339/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Deploying ...' Deploying ... + [[ -z openshift-3.10-release ]] + [[ openshift-3.10-release =~ .*-dev ]] + [[ openshift-3.10-release =~ .*-release ]] + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/demo-content.yaml =~ .*demo.* ]] + continue + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml =~ .*demo.* ]] + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml clusterrole.rbac.authorization.k8s.io "kubevirt.io:admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:edit" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:view" created serviceaccount "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver-auth-delegator" created rolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created role.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-controller" created serviceaccount "kubevirt-controller" created serviceaccount "kubevirt-privileged" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller-cluster-admin" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-privileged-cluster-admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:default" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt.io:default" created service "virt-api" created deployment.extensions "virt-api" created deployment.extensions "virt-controller" created daemonset.extensions "virt-handler" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstances.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancereplicasets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancepresets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachines.kubevirt.io" created + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R persistentvolumeclaim "disk-alpine" created persistentvolume "host-path-disk-alpine" created persistentvolumeclaim "disk-custom" created persistentvolume "host-path-disk-custom" created daemonset.extensions "disks-images-provider" created serviceaccount "kubevirt-testing" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-testing-cluster-admin" created + [[ os-3.10.0 =~ os-* ]] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-controller"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-testing"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-privileged"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-apiserver"] + _kubectl adm policy add-scc-to-user privileged admin + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged admin scc "privileged" added to: ["admin"] + echo Done Done + namespaces=(kube-system default) + [[ kube-system != \k\u\b\e\-\s\y\s\t\e\m ]] + timeout=300 + sample=30 + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'disks-images-provider-mgvbf 0/1 ContainerCreating 0 1s disks-images-provider-pknmj 0/1 ContainerCreating 0 1s virt-api-7d79764579-ksjrq 0/1 ContainerCreating 0 2s virt-api-7d79764579-lbcmq 0/1 ContainerCreating 0 2s virt-controller-7d57d96b65-csf74 0/1 ContainerCreating 0 2s virt-controller-7d57d96b65-k6hk6 0/1 ContainerCreating 0 2s virt-handler-n99ls 0/1 ContainerCreating 0 2s virt-handler-tt462 0/1 ContainerCreating 0 2s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + grep -v Running + cluster/kubectl.sh get pods -n kube-system --no-headers disks-images-provider-mgvbf 0/1 ContainerCreating 0 2s disks-images-provider-pknmj 0/1 ContainerCreating 0 2s virt-api-7d79764579-ksjrq 0/1 ContainerCreating 0 3s virt-api-7d79764579-lbcmq 0/1 ContainerCreating 0 3s virt-controller-7d57d96b65-csf74 0/1 ContainerCreating 0 3s virt-controller-7d57d96b65-k6hk6 0/1 ContainerCreating 0 3s virt-handler-n99ls 0/1 ContainerCreating 0 3s virt-handler-tt462 0/1 ContainerCreating 0 3s + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n false ']' + echo 'Waiting for KubeVirt containers to become ready ...' Waiting for KubeVirt containers to become ready ... + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + grep false + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers false + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false + '[' -n '' ']' + kubectl get pods -n kube-system + cluster/kubectl.sh get pods -n kube-system NAME READY STATUS RESTARTS AGE disks-images-provider-mgvbf 1/1 Running 0 1m disks-images-provider-pknmj 1/1 Running 0 1m master-api-node01 1/1 Running 1 27d master-controllers-node01 1/1 Running 1 27d master-etcd-node01 1/1 Running 1 27d virt-api-7d79764579-ksjrq 1/1 Running 0 1m virt-api-7d79764579-lbcmq 1/1 Running 0 1m virt-controller-7d57d96b65-csf74 1/1 Running 0 1m virt-controller-7d57d96b65-k6hk6 1/1 Running 0 1m virt-handler-n99ls 1/1 Running 0 1m virt-handler-tt462 1/1 Running 0 1m + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n default --no-headers ++ cluster/kubectl.sh get pods -n default --no-headers ++ grep -v Running + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n default + cluster/kubectl.sh get pods -n default NAME READY STATUS RESTARTS AGE docker-registry-1-rl562 1/1 Running 1 27d registry-console-1-rw9zf 1/1 Running 1 27d router-1-6cch9 1/1 Running 1 27d + kubectl version + cluster/kubectl.sh version oc v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 features: Basic-Auth GSSAPI Kerberos SPNEGO Server https://127.0.0.1:33336 openshift v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 + ginko_params='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + [[ openshift-3.10-release =~ windows.* ]] + FUNC_TEST_ARGS='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + make functest hack/dockerized "hack/build-func-tests.sh" sha256:ceba12cbc33e4e37a707840478a630db561e2427b78c8c9f9cd6d0b73276ab32 go version go1.10 linux/amd64 Waiting for rsyncd to be ready go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/functests.sh Running Suite: Tests Suite ========================== Random Seed: 1533380959 Will run 148 of 148 specs •• Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T11:13:31.799036Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:13:40.090152Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:13:41 http: TLS handshake error from 10.128.0.1:47538: EOF level=info timestamp=2018-08-04T11:13:41.753654Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:13:50.136781Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:13:51 http: TLS handshake error from 10.128.0.1:47586: EOF level=info timestamp=2018-08-04T11:13:53.050801Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:13:56.002675Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T11:14:00.183192Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:14:01 http: TLS handshake error from 10.128.0.1:47636: EOF level=info timestamp=2018-08-04T11:14:01.855768Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:14:10.233824Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:14:11 http: TLS handshake error from 10.128.0.1:47682: EOF level=info timestamp=2018-08-04T11:14:11.802070Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:14:20.278108Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 11:12:07 http: TLS handshake error from 10.129.0.1:47876: EOF 2018/08/04 11:12:17 http: TLS handshake error from 10.129.0.1:47886: EOF 2018/08/04 11:12:27 http: TLS handshake error from 10.129.0.1:47896: EOF 2018/08/04 11:12:37 http: TLS handshake error from 10.129.0.1:47906: EOF 2018/08/04 11:12:47 http: TLS handshake error from 10.129.0.1:47916: EOF level=info timestamp=2018-08-04T11:12:55.517010Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:12:57 http: TLS handshake error from 10.129.0.1:47926: EOF 2018/08/04 11:13:07 http: TLS handshake error from 10.129.0.1:47936: EOF 2018/08/04 11:13:17 http: TLS handshake error from 10.129.0.1:47948: EOF 2018/08/04 11:13:27 http: TLS handshake error from 10.129.0.1:47958: EOF 2018/08/04 11:13:37 http: TLS handshake error from 10.129.0.1:47968: EOF 2018/08/04 11:13:47 http: TLS handshake error from 10.129.0.1:47978: EOF 2018/08/04 11:13:57 http: TLS handshake error from 10.129.0.1:47988: EOF 2018/08/04 11:14:07 http: TLS handshake error from 10.129.0.1:47998: EOF 2018/08/04 11:14:17 http: TLS handshake error from 10.129.0.1:48008: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:08:00.871669Z pos=virtinformers.go:107 component=virt-controller service=http msg="STARTING informer vmiInformer" level=info timestamp=2018-08-04T11:08:00.871794Z pos=virtinformers.go:107 component=virt-controller service=http msg="STARTING informer kubeVirtPodInformer" level=info timestamp=2018-08-04T11:08:00.871824Z pos=virtinformers.go:107 component=virt-controller service=http msg="STARTING informer kubeVirtNodeInformer" level=info timestamp=2018-08-04T11:08:00.893316Z pos=virtinformers.go:107 component=virt-controller service=http msg="STARTING informer vmiPresetInformer" level=info timestamp=2018-08-04T11:08:00.893353Z pos=virtinformers.go:107 component=virt-controller service=http msg="STARTING informer vmirsInformer" level=info timestamp=2018-08-04T11:08:00.893379Z pos=virtinformers.go:107 component=virt-controller service=http msg="STARTING informer configMapInformer" level=info timestamp=2018-08-04T11:08:00.893394Z pos=virtinformers.go:107 component=virt-controller service=http msg="STARTING informer vmInformer" level=info timestamp=2018-08-04T11:08:00.893409Z pos=virtinformers.go:107 component=virt-controller service=http msg="STARTING informer limitrangeInformer" level=info timestamp=2018-08-04T11:08:00.893495Z pos=vm.go:85 component=virt-controller service=http msg="Starting VirtualMachine controller." level=info timestamp=2018-08-04T11:08:00.903065Z pos=node.go:104 component=virt-controller service=http msg="Starting node controller." level=info timestamp=2018-08-04T11:08:00.922268Z pos=vmi.go:129 component=virt-controller service=http msg="Starting vmi controller." level=info timestamp=2018-08-04T11:08:00.922360Z pos=replicaset.go:111 component=virt-controller service=http msg="Starting VirtualMachineInstanceReplicaSet controller." level=info timestamp=2018-08-04T11:08:00.922433Z pos=preset.go:74 component=virt-controller service=http msg="Starting Virtual Machine Initializer." level=info timestamp=2018-08-04T11:09:20.310877Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiw2h25 kind= uid=d63dae3e-97d6-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:09:20.311184Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiw2h25 kind= uid=d63dae3e-97d6-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[228:{} 236:{} 60:{} 61:{} 184:{} 227:{} 235:{} 231:{} 232:{} 59:{} 62:{} 183:{} 144:{} 63:{}] level=error timestamp=2018-08-04T11:13:13.401582Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[184:{} 228:{} 235:{} 236:{} 227:{} 144:{} 231:{} 232:{} 61:{} 183:{} 63:{} 59:{} 60:{} 62:{}] level=error timestamp=2018-08-04T11:13:16.531290Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmiw2h25-9pl9q Pod phase: Pending ------------------------------ • Failure [300.554 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should update VirtualMachine once VMIs are up [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:195 Timed out after 300.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:201 ------------------------------ •• Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T11:19:01.130873Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:19:01.147170Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:19:01.161078Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:19:01 http: TLS handshake error from 10.128.0.1:49064: EOF level=info timestamp=2018-08-04T11:19:01.745376Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:19:02.402559Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:19:11 http: TLS handshake error from 10.128.0.1:49110: EOF level=info timestamp=2018-08-04T11:19:11.782579Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:19:12.328132Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:19:13.822787Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:19:13.835298Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:19:13.854115Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:19:21 http: TLS handshake error from 10.128.0.1:49156: EOF level=info timestamp=2018-08-04T11:19:21.830723Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:19:23.738128Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running level=info timestamp=2018-08-04T11:17:25.504715Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:17:27 http: TLS handshake error from 10.129.0.1:48200: EOF 2018/08/04 11:17:37 http: TLS handshake error from 10.129.0.1:48210: EOF 2018/08/04 11:17:47 http: TLS handshake error from 10.129.0.1:48220: EOF level=info timestamp=2018-08-04T11:17:55.492371Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:17:57 http: TLS handshake error from 10.129.0.1:48230: EOF 2018/08/04 11:18:07 http: TLS handshake error from 10.129.0.1:48240: EOF 2018/08/04 11:18:17 http: TLS handshake error from 10.129.0.1:48250: EOF 2018/08/04 11:18:27 http: TLS handshake error from 10.129.0.1:48260: EOF 2018/08/04 11:18:37 http: TLS handshake error from 10.129.0.1:48270: EOF 2018/08/04 11:18:47 http: TLS handshake error from 10.129.0.1:48280: EOF level=info timestamp=2018-08-04T11:18:55.465725Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:18:57 http: TLS handshake error from 10.129.0.1:48290: EOF 2018/08/04 11:19:07 http: TLS handshake error from 10.129.0.1:48300: EOF 2018/08/04 11:19:17 http: TLS handshake error from 10.129.0.1:48312: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:14:20.988963Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmixlq9r kind= uid=8975572d-97d7-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:14:20.989051Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmixlq9r kind= uid=8975572d-97d7-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:14:21.035457Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmixlq9r\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmixlq9r" level=info timestamp=2018-08-04T11:14:21.104095Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmixlq9r\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmixlq9r" level=info timestamp=2018-08-04T11:14:21.126867Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmixlq9r\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmixlq9r" level=info timestamp=2018-08-04T11:14:21.146700Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmixlq9r\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmixlq9r, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 8975572d-97d7-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmixlq9r" level=info timestamp=2018-08-04T11:14:23.085720Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihrlk6 kind= uid=8ab5ea92-97d7-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:14:23.086197Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihrlk6 kind= uid=8ab5ea92-97d7-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:14:23.172937Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihrlk6\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihrlk6" level=info timestamp=2018-08-04T11:14:24.091559Z pos=controller_ref_manager.go:291 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihrlk6 kind= uid=8ab5ea92-97d7-11e8-96b4-525500d15501 msg="patching vmi to remove its controllerRef to kubevirt.io/v1alpha2/VirtualMachine:testvmihrlk6" level=info timestamp=2018-08-04T11:14:25.178007Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihrlk6\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmihrlk6, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 8ab5ea92-97d7-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihrlk6" level=info timestamp=2018-08-04T11:14:25.366671Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8wbpd kind= uid=8c11e197-97d7-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:14:25.366925Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8wbpd kind= uid=8c11e197-97d7-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:14:25.411152Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8wbpd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8wbpd" level=info timestamp=2018-08-04T11:14:25.427667Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8wbpd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8wbpd" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[232:{} 236:{} 59:{} 231:{} 60:{} 61:{} 184:{} 227:{} 62:{} 63:{} 183:{} 228:{} 144:{} 235:{}] level=error timestamp=2018-08-04T11:18:10.797114Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[61:{} 184:{} 227:{} 236:{} 183:{} 144:{} 228:{} 63:{} 232:{} 59:{} 235:{} 60:{} 62:{} 231:{}] Pod name: virt-launcher-testvmi8wbpd-dlzgm Pod phase: Pending ------------------------------ • Failure [300.469 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if it gets deleted [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:245 Timed out after 300.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:157 ------------------------------ STEP: Starting the VirtualMachineInstance STEP: VMI has the running condition Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T11:20:42.496705Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:20:51 http: TLS handshake error from 10.128.0.1:49582: EOF level=info timestamp=2018-08-04T11:20:52.257188Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:20:53.925805Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:20:55.966158Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:21:01 http: TLS handshake error from 10.128.0.1:49632: EOF level=info timestamp=2018-08-04T11:21:02.301669Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:21:02.609936Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:21:11 http: TLS handshake error from 10.128.0.1:49678: EOF level=info timestamp=2018-08-04T11:21:12.349457Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:21:12.546673Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:21:21 http: TLS handshake error from 10.128.0.1:49724: EOF level=info timestamp=2018-08-04T11:21:22.396521Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:21:23.980509Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:21:26.034021Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 11:19:17 http: TLS handshake error from 10.129.0.1:48312: EOF level=info timestamp=2018-08-04T11:19:25.515526Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:19:27 http: TLS handshake error from 10.129.0.1:48322: EOF 2018/08/04 11:19:37 http: TLS handshake error from 10.129.0.1:48332: EOF 2018/08/04 11:19:47 http: TLS handshake error from 10.129.0.1:48342: EOF 2018/08/04 11:19:57 http: TLS handshake error from 10.129.0.1:48352: EOF 2018/08/04 11:20:07 http: TLS handshake error from 10.129.0.1:48362: EOF 2018/08/04 11:20:17 http: TLS handshake error from 10.129.0.1:48372: EOF 2018/08/04 11:20:27 http: TLS handshake error from 10.129.0.1:48382: EOF 2018/08/04 11:20:37 http: TLS handshake error from 10.129.0.1:48392: EOF 2018/08/04 11:20:47 http: TLS handshake error from 10.129.0.1:48402: EOF 2018/08/04 11:20:57 http: TLS handshake error from 10.129.0.1:48412: EOF 2018/08/04 11:21:07 http: TLS handshake error from 10.129.0.1:48422: EOF 2018/08/04 11:21:17 http: TLS handshake error from 10.129.0.1:48432: EOF level=info timestamp=2018-08-04T11:21:25.003797Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:14:21.126867Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmixlq9r\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmixlq9r" level=info timestamp=2018-08-04T11:14:21.146700Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmixlq9r\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmixlq9r, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 8975572d-97d7-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmixlq9r" level=info timestamp=2018-08-04T11:14:23.085720Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihrlk6 kind= uid=8ab5ea92-97d7-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:14:23.086197Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihrlk6 kind= uid=8ab5ea92-97d7-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:14:23.172937Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihrlk6\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihrlk6" level=info timestamp=2018-08-04T11:14:24.091559Z pos=controller_ref_manager.go:291 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihrlk6 kind= uid=8ab5ea92-97d7-11e8-96b4-525500d15501 msg="patching vmi to remove its controllerRef to kubevirt.io/v1alpha2/VirtualMachine:testvmihrlk6" level=info timestamp=2018-08-04T11:14:25.178007Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihrlk6\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmihrlk6, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 8ab5ea92-97d7-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihrlk6" level=info timestamp=2018-08-04T11:14:25.366671Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8wbpd kind= uid=8c11e197-97d7-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:14:25.366925Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8wbpd kind= uid=8c11e197-97d7-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:14:25.411152Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8wbpd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8wbpd" level=info timestamp=2018-08-04T11:14:25.427667Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8wbpd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8wbpd" level=info timestamp=2018-08-04T11:19:25.799945Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmivr2l8 kind= uid=3f245e49-97d8-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:19:25.800201Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmivr2l8 kind= uid=3f245e49-97d8-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:19:25.859058Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:19:25.873977Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[236:{} 235:{} 62:{} 183:{} 232:{} 59:{} 60:{} 61:{} 184:{} 228:{} 231:{} 227:{} 144:{} 63:{}] level=error timestamp=2018-08-04T11:21:04.969177Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[227:{} 183:{} 144:{} 63:{} 232:{} 59:{} 61:{} 62:{} 235:{} 231:{} 236:{} 60:{} 184:{} 228:{}] Pod name: virt-launcher-testvmivr2l8-dfzhw Pod phase: Pending • Failure [120.422 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if the VirtualMachineInstance's pod gets deleted [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:265 Timed out after 120.000s. Expected success, but got an error: <*errors.errorString | 0xc420441370>: { s: "vmi still isn't running", } vmi still isn't running /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:284 ------------------------------ STEP: Creating a new VMI STEP: Waiting for the VMI's VirtualMachineInstance to start Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T11:25:43.101243Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:25:43.709299Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:25:51 http: TLS handshake error from 10.128.0.1:51020: EOF level=info timestamp=2018-08-04T11:25:53.750976Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:25:54.568635Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:26:01 http: TLS handshake error from 10.128.0.1:51070: EOF level=info timestamp=2018-08-04T11:26:03.201722Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:26:03.788837Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:26:11 http: TLS handshake error from 10.128.0.1:51116: EOF level=info timestamp=2018-08-04T11:26:13.152622Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:26:13.838148Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:26:21 http: TLS handshake error from 10.128.0.1:51164: EOF level=info timestamp=2018-08-04T11:26:23.885415Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:26:24.628943Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:26:26.022298Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running level=info timestamp=2018-08-04T11:24:25.525543Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:24:27 http: TLS handshake error from 10.129.0.1:48624: EOF 2018/08/04 11:24:37 http: TLS handshake error from 10.129.0.1:48634: EOF 2018/08/04 11:24:47 http: TLS handshake error from 10.129.0.1:48644: EOF level=info timestamp=2018-08-04T11:24:55.510723Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:24:57 http: TLS handshake error from 10.129.0.1:48654: EOF 2018/08/04 11:25:07 http: TLS handshake error from 10.129.0.1:48664: EOF 2018/08/04 11:25:17 http: TLS handshake error from 10.129.0.1:48676: EOF 2018/08/04 11:25:27 http: TLS handshake error from 10.129.0.1:48686: EOF 2018/08/04 11:25:37 http: TLS handshake error from 10.129.0.1:48696: EOF 2018/08/04 11:25:47 http: TLS handshake error from 10.129.0.1:48706: EOF level=info timestamp=2018-08-04T11:25:55.478595Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:25:57 http: TLS handshake error from 10.129.0.1:48716: EOF 2018/08/04 11:26:07 http: TLS handshake error from 10.129.0.1:48726: EOF 2018/08/04 11:26:17 http: TLS handshake error from 10.129.0.1:48736: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:14:23.086197Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihrlk6 kind= uid=8ab5ea92-97d7-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:14:23.172937Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihrlk6\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihrlk6" level=info timestamp=2018-08-04T11:14:24.091559Z pos=controller_ref_manager.go:291 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihrlk6 kind= uid=8ab5ea92-97d7-11e8-96b4-525500d15501 msg="patching vmi to remove its controllerRef to kubevirt.io/v1alpha2/VirtualMachine:testvmihrlk6" level=info timestamp=2018-08-04T11:14:25.178007Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihrlk6\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmihrlk6, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 8ab5ea92-97d7-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihrlk6" level=info timestamp=2018-08-04T11:14:25.366671Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8wbpd kind= uid=8c11e197-97d7-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:14:25.366925Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8wbpd kind= uid=8c11e197-97d7-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:14:25.411152Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8wbpd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8wbpd" level=info timestamp=2018-08-04T11:14:25.427667Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8wbpd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8wbpd" level=info timestamp=2018-08-04T11:19:25.799945Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmivr2l8 kind= uid=3f245e49-97d8-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:19:25.800201Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmivr2l8 kind= uid=3f245e49-97d8-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:19:25.859058Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:19:25.873977Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:21:26.078935Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmivr2l8, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 3f245e49-97d8-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:21:26.301877Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiffz6s kind= uid=86f79dc6-97d8-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:21:26.302113Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiffz6s kind= uid=86f79dc6-97d8-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[60:{} 61:{} 227:{} 62:{} 228:{} 59:{} 183:{} 144:{} 184:{} 235:{} 63:{} 236:{} 231:{} 232:{}] level=error timestamp=2018-08-04T11:25:29.643931Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[231:{} 144:{} 60:{} 183:{} 228:{} 236:{} 62:{} 63:{} 59:{} 61:{} 184:{} 227:{} 235:{} 232:{}] level=error timestamp=2018-08-04T11:25:32.747044Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmiffz6s-6cz92 Pod phase: Pending • Failure [300.534 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should stop VirtualMachineInstance if running set to false [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:325 Timed out after 300.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:157 ------------------------------ STEP: Starting the VirtualMachineInstance STEP: VMI has the running condition Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T11:31:03.716895Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:31:03.735063Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:31:03.838578Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:31:05.260329Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:31:11 http: TLS handshake error from 10.128.0.1:52542: EOF level=info timestamp=2018-08-04T11:31:13.723582Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:31:15.302865Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:31:16.084696Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:31:16.098831Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:31:16.109503Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:31:21 http: TLS handshake error from 10.128.0.1:52590: EOF level=info timestamp=2018-08-04T11:31:25.292399Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:31:25.350289Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:31:26.060103Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T11:31:26.402947Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 11:29:37 http: TLS handshake error from 10.129.0.1:48938: EOF 2018/08/04 11:29:47 http: TLS handshake error from 10.129.0.1:48948: EOF level=info timestamp=2018-08-04T11:29:55.475918Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:29:57 http: TLS handshake error from 10.129.0.1:48958: EOF 2018/08/04 11:30:07 http: TLS handshake error from 10.129.0.1:48968: EOF 2018/08/04 11:30:17 http: TLS handshake error from 10.129.0.1:48978: EOF 2018/08/04 11:30:27 http: TLS handshake error from 10.129.0.1:48988: EOF 2018/08/04 11:30:37 http: TLS handshake error from 10.129.0.1:48998: EOF 2018/08/04 11:30:47 http: TLS handshake error from 10.129.0.1:49008: EOF level=info timestamp=2018-08-04T11:30:55.508557Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:30:57 http: TLS handshake error from 10.129.0.1:49018: EOF 2018/08/04 11:31:07 http: TLS handshake error from 10.129.0.1:49028: EOF 2018/08/04 11:31:17 http: TLS handshake error from 10.129.0.1:49040: EOF level=info timestamp=2018-08-04T11:31:25.003783Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:31:27 http: TLS handshake error from 10.129.0.1:49050: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:14:25.178007Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihrlk6\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmihrlk6, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 8ab5ea92-97d7-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihrlk6" level=info timestamp=2018-08-04T11:14:25.366671Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8wbpd kind= uid=8c11e197-97d7-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:14:25.366925Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi8wbpd kind= uid=8c11e197-97d7-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:14:25.411152Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8wbpd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8wbpd" level=info timestamp=2018-08-04T11:14:25.427667Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8wbpd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8wbpd" level=info timestamp=2018-08-04T11:19:25.799945Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmivr2l8 kind= uid=3f245e49-97d8-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:19:25.800201Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmivr2l8 kind= uid=3f245e49-97d8-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:19:25.859058Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:19:25.873977Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:21:26.078935Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmivr2l8, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 3f245e49-97d8-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:21:26.301877Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiffz6s kind= uid=86f79dc6-97d8-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:21:26.302113Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiffz6s kind= uid=86f79dc6-97d8-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:26:26.630499Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiffz6s\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiffz6s, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 86f79dc6-97d8-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiffz6s" level=info timestamp=2018-08-04T11:26:26.811653Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdkcm kind= uid=3a13d365-97d9-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:26:26.811815Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdkcm kind= uid=3a13d365-97d9-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[59:{} 184:{} 228:{} 61:{} 62:{} 144:{} 235:{} 232:{} 236:{} 60:{} 227:{} 231:{} 183:{} 63:{}] level=error timestamp=2018-08-04T11:30:34.000617Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[62:{} 235:{} 63:{} 236:{} 183:{} 231:{} 232:{} 61:{} 184:{} 227:{} 144:{} 59:{} 228:{} 60:{}] level=error timestamp=2018-08-04T11:30:37.081984Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmibdkcm-7kc8d Pod phase: Pending • Failure [301.488 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should start and stop VirtualMachineInstance multiple times [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:333 Timed out after 300.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:157 ------------------------------ STEP: Doing run: 0 STEP: Starting the VirtualMachineInstance STEP: VMI has the running condition Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T11:37:04.570647Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:37:04.907396Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:37:04.919806Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:37:04.933926Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:37:06.989566Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:37:11 http: TLS handshake error from 10.128.0.1:54266: EOF level=info timestamp=2018-08-04T11:37:14.445798Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:37:17.034610Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:37:17.274537Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:37:17.290644Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:37:17.311051Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:37:21 http: TLS handshake error from 10.128.0.1:54312: EOF level=info timestamp=2018-08-04T11:37:26.050412Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T11:37:26.117300Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:37:27.091290Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 11:35:07 http: TLS handshake error from 10.129.0.1:49272: EOF 2018/08/04 11:35:17 http: TLS handshake error from 10.129.0.1:49282: EOF 2018/08/04 11:35:27 http: TLS handshake error from 10.129.0.1:49292: EOF 2018/08/04 11:35:37 http: TLS handshake error from 10.129.0.1:49302: EOF 2018/08/04 11:35:47 http: TLS handshake error from 10.129.0.1:49312: EOF 2018/08/04 11:35:57 http: TLS handshake error from 10.129.0.1:49322: EOF 2018/08/04 11:36:07 http: TLS handshake error from 10.129.0.1:49332: EOF 2018/08/04 11:36:17 http: TLS handshake error from 10.129.0.1:49342: EOF 2018/08/04 11:36:27 http: TLS handshake error from 10.129.0.1:49352: EOF 2018/08/04 11:36:37 http: TLS handshake error from 10.129.0.1:49362: EOF 2018/08/04 11:36:47 http: TLS handshake error from 10.129.0.1:49372: EOF 2018/08/04 11:36:57 http: TLS handshake error from 10.129.0.1:49382: EOF 2018/08/04 11:37:07 http: TLS handshake error from 10.129.0.1:49392: EOF 2018/08/04 11:37:17 http: TLS handshake error from 10.129.0.1:49404: EOF 2018/08/04 11:37:27 http: TLS handshake error from 10.129.0.1:49414: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:14:25.411152Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8wbpd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8wbpd" level=info timestamp=2018-08-04T11:14:25.427667Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi8wbpd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi8wbpd" level=info timestamp=2018-08-04T11:19:25.799945Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmivr2l8 kind= uid=3f245e49-97d8-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:19:25.800201Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmivr2l8 kind= uid=3f245e49-97d8-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:19:25.859058Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:19:25.873977Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:21:26.078935Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmivr2l8, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 3f245e49-97d8-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:21:26.301877Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiffz6s kind= uid=86f79dc6-97d8-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:21:26.302113Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiffz6s kind= uid=86f79dc6-97d8-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:26:26.630499Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiffz6s\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiffz6s, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 86f79dc6-97d8-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiffz6s" level=info timestamp=2018-08-04T11:26:26.811653Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdkcm kind= uid=3a13d365-97d9-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:26:26.811815Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdkcm kind= uid=3a13d365-97d9-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:31:28.254886Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmig2fcb kind= uid=edc22dec-97d9-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:31:28.255016Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmig2fcb kind= uid=edc22dec-97d9-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:31:28.312986Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmig2fcb\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmig2fcb" Pod name: virt-handler-n99ls Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[236:{} 231:{} 63:{} 61:{} 183:{} 228:{} 235:{} 232:{} 59:{} 184:{} 144:{} 60:{} 227:{} 62:{}] Pod name: virt-handler-tt462 Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[183:{} 144:{} 235:{} 231:{} 59:{} 62:{} 228:{} 63:{} 236:{} 61:{} 232:{} 60:{} 184:{} 227:{}] Pod name: virt-launcher-testvmig2fcb-hxs7w Pod phase: Pending • Failure [360.432 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should not update the VirtualMachineInstance spec if Running [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:346 Timed out after 360.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:353 ------------------------------ Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T11:41:44.916999Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:41:48.327218Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:41:51 http: TLS handshake error from 10.128.0.1:55600: EOF level=info timestamp=2018-08-04T11:41:56.613085Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:41:58.379714Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:42:01 http: TLS handshake error from 10.128.0.1:55650: EOF level=info timestamp=2018-08-04T11:42:05.206576Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:42:08.424310Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:42:11 http: TLS handshake error from 10.128.0.1:55696: EOF level=info timestamp=2018-08-04T11:42:14.970183Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:42:18.473436Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:42:21 http: TLS handshake error from 10.128.0.1:55742: EOF level=info timestamp=2018-08-04T11:42:26.062694Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T11:42:26.679020Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:42:28.523454Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 11:40:47 http: TLS handshake error from 10.129.0.1:49616: EOF level=info timestamp=2018-08-04T11:40:55.509822Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:40:57 http: TLS handshake error from 10.129.0.1:49626: EOF 2018/08/04 11:41:07 http: TLS handshake error from 10.129.0.1:49636: EOF 2018/08/04 11:41:17 http: TLS handshake error from 10.129.0.1:49646: EOF level=info timestamp=2018-08-04T11:41:25.482066Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T11:41:25.888919Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:41:27 http: TLS handshake error from 10.129.0.1:49656: EOF 2018/08/04 11:41:37 http: TLS handshake error from 10.129.0.1:49666: EOF 2018/08/04 11:41:47 http: TLS handshake error from 10.129.0.1:49676: EOF level=info timestamp=2018-08-04T11:41:55.530474Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:41:57 http: TLS handshake error from 10.129.0.1:49686: EOF 2018/08/04 11:42:07 http: TLS handshake error from 10.129.0.1:49696: EOF 2018/08/04 11:42:17 http: TLS handshake error from 10.129.0.1:49706: EOF 2018/08/04 11:42:27 http: TLS handshake error from 10.129.0.1:49716: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:19:25.873977Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:21:26.078935Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivr2l8\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmivr2l8, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 3f245e49-97d8-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivr2l8" level=info timestamp=2018-08-04T11:21:26.301877Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiffz6s kind= uid=86f79dc6-97d8-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:21:26.302113Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiffz6s kind= uid=86f79dc6-97d8-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:26:26.630499Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiffz6s\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiffz6s, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 86f79dc6-97d8-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiffz6s" level=info timestamp=2018-08-04T11:26:26.811653Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdkcm kind= uid=3a13d365-97d9-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:26:26.811815Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdkcm kind= uid=3a13d365-97d9-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:31:28.254886Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmig2fcb kind= uid=edc22dec-97d9-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:31:28.255016Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmig2fcb kind= uid=edc22dec-97d9-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:31:28.312986Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmig2fcb\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmig2fcb" level=info timestamp=2018-08-04T11:37:28.519331Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmig2fcb\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmig2fcb, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: edc22dec-97d9-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmig2fcb" level=info timestamp=2018-08-04T11:37:28.525932Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmig2fcb\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmig2fcb, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: edc22dec-97d9-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmig2fcb" level=info timestamp=2018-08-04T11:37:28.695130Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiflk66 kind= uid=c498bc50-97da-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:37:28.695266Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiflk66 kind= uid=c498bc50-97da-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:37:28.746038Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiflk66\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiflk66" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[227:{} 183:{} 232:{} 59:{} 61:{} 62:{} 228:{} 236:{} 144:{} 235:{} 231:{} 60:{} 184:{} 63:{}] level=error timestamp=2018-08-04T11:40:56.867505Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[227:{} 62:{} 228:{} 235:{} 232:{} 60:{} 183:{} 59:{} 144:{} 231:{} 63:{} 236:{} 61:{} 184:{}] level=error timestamp=2018-08-04T11:40:59.916845Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmiflk66-9rds4 Pod phase: Pending • Failure [301.451 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should survive guest shutdown, multiple times [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:387 Timed out after 300.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:157 ------------------------------ STEP: Creating new VMI, not running STEP: Starting the VirtualMachineInstance STEP: VMI has the running condition VM testvmi9ttsr was scheduled to start Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T11:47:45.609167Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:47:50.087704Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:47:51 http: TLS handshake error from 10.128.0.1:57316: EOF level=info timestamp=2018-08-04T11:47:56.002978Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T11:47:57.282002Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:48:00.135919Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:48:01 http: TLS handshake error from 10.128.0.1:57366: EOF level=info timestamp=2018-08-04T11:48:06.291209Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:48:10.184027Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:48:11 http: TLS handshake error from 10.128.0.1:57412: EOF level=info timestamp=2018-08-04T11:48:15.665341Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:48:20.236758Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:48:21 http: TLS handshake error from 10.128.0.1:57458: EOF level=info timestamp=2018-08-04T11:48:27.341585Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:48:30.275865Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 11:46:37 http: TLS handshake error from 10.129.0.1:49970: EOF 2018/08/04 11:46:47 http: TLS handshake error from 10.129.0.1:49980: EOF level=info timestamp=2018-08-04T11:46:55.535236Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:46:57 http: TLS handshake error from 10.129.0.1:49990: EOF 2018/08/04 11:47:07 http: TLS handshake error from 10.129.0.1:50000: EOF 2018/08/04 11:47:17 http: TLS handshake error from 10.129.0.1:50010: EOF level=info timestamp=2018-08-04T11:47:25.526681Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:47:27 http: TLS handshake error from 10.129.0.1:50020: EOF 2018/08/04 11:47:37 http: TLS handshake error from 10.129.0.1:50030: EOF 2018/08/04 11:47:47 http: TLS handshake error from 10.129.0.1:50040: EOF 2018/08/04 11:47:57 http: TLS handshake error from 10.129.0.1:50050: EOF 2018/08/04 11:48:07 http: TLS handshake error from 10.129.0.1:50060: EOF 2018/08/04 11:48:17 http: TLS handshake error from 10.129.0.1:50070: EOF level=info timestamp=2018-08-04T11:48:25.524564Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:48:27 http: TLS handshake error from 10.129.0.1:50080: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:21:26.302113Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiffz6s kind= uid=86f79dc6-97d8-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:26:26.630499Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiffz6s\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiffz6s, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 86f79dc6-97d8-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiffz6s" level=info timestamp=2018-08-04T11:26:26.811653Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdkcm kind= uid=3a13d365-97d9-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:26:26.811815Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdkcm kind= uid=3a13d365-97d9-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:31:28.254886Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmig2fcb kind= uid=edc22dec-97d9-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:31:28.255016Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmig2fcb kind= uid=edc22dec-97d9-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:31:28.312986Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmig2fcb\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmig2fcb" level=info timestamp=2018-08-04T11:37:28.519331Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmig2fcb\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmig2fcb, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: edc22dec-97d9-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmig2fcb" level=info timestamp=2018-08-04T11:37:28.525932Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmig2fcb\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmig2fcb, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: edc22dec-97d9-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmig2fcb" level=info timestamp=2018-08-04T11:37:28.695130Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiflk66 kind= uid=c498bc50-97da-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:37:28.695266Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiflk66 kind= uid=c498bc50-97da-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:37:28.746038Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiflk66\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiflk66" level=info timestamp=2018-08-04T11:42:29.986869Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiflk66\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiflk66, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: c498bc50-97da-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiflk66" level=info timestamp=2018-08-04T11:42:30.142768Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9ttsr kind= uid=784644a3-97db-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:42:30.142874Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9ttsr kind= uid=784644a3-97db-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[60:{} 61:{} 183:{} 144:{} 228:{} 231:{} 63:{} 59:{} 235:{} 232:{} 236:{} 227:{} 62:{} 184:{}] level=error timestamp=2018-08-04T11:47:07.473991Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency level=error timestamp=2018-08-04T11:47:10.524999Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[59:{} 227:{} 62:{} 236:{} 183:{} 144:{} 235:{} 60:{} 228:{} 231:{} 63:{} 232:{} 61:{} 184:{}] Pod name: virt-launcher-testvmi9ttsr-tw86j Pod phase: Pending • Failure [360.442 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should start a VirtualMachineInstance once [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:436 Timed out after 360.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:453 ------------------------------ STEP: getting an VMI STEP: Invoking virtctl start STEP: Getting the status of the VMI Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running 2018/08/04 11:53:41 http: TLS handshake error from 10.128.0.1:58994: EOF level=info timestamp=2018-08-04T11:53:41.869465Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:53:46.298656Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:53:51 http: TLS handshake error from 10.128.0.1:59040: EOF level=info timestamp=2018-08-04T11:53:51.917435Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:53:58.038623Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:54:01 http: TLS handshake error from 10.128.0.1:59090: EOF level=info timestamp=2018-08-04T11:54:01.957913Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:54:07.054720Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:54:11 http: TLS handshake error from 10.128.0.1:59136: EOF level=info timestamp=2018-08-04T11:54:12.007442Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:54:16.359651Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:54:21 http: TLS handshake error from 10.128.0.1:59182: EOF level=info timestamp=2018-08-04T11:54:22.051048Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:54:28.093083Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 11:52:27 http: TLS handshake error from 10.129.0.1:50324: EOF 2018/08/04 11:52:37 http: TLS handshake error from 10.129.0.1:50334: EOF 2018/08/04 11:52:47 http: TLS handshake error from 10.129.0.1:50344: EOF 2018/08/04 11:52:57 http: TLS handshake error from 10.129.0.1:50354: EOF 2018/08/04 11:53:07 http: TLS handshake error from 10.129.0.1:50364: EOF 2018/08/04 11:53:17 http: TLS handshake error from 10.129.0.1:50374: EOF 2018/08/04 11:53:27 http: TLS handshake error from 10.129.0.1:50384: EOF 2018/08/04 11:53:37 http: TLS handshake error from 10.129.0.1:50394: EOF 2018/08/04 11:53:47 http: TLS handshake error from 10.129.0.1:50404: EOF level=info timestamp=2018-08-04T11:53:55.540304Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:53:57 http: TLS handshake error from 10.129.0.1:50414: EOF 2018/08/04 11:54:07 http: TLS handshake error from 10.129.0.1:50424: EOF 2018/08/04 11:54:17 http: TLS handshake error from 10.129.0.1:50434: EOF level=info timestamp=2018-08-04T11:54:25.543879Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:54:27 http: TLS handshake error from 10.129.0.1:50444: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:26:26.811653Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdkcm kind= uid=3a13d365-97d9-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:26:26.811815Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdkcm kind= uid=3a13d365-97d9-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:31:28.254886Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmig2fcb kind= uid=edc22dec-97d9-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:31:28.255016Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmig2fcb kind= uid=edc22dec-97d9-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:31:28.312986Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmig2fcb\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmig2fcb" level=info timestamp=2018-08-04T11:37:28.519331Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmig2fcb\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmig2fcb, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: edc22dec-97d9-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmig2fcb" level=info timestamp=2018-08-04T11:37:28.525932Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmig2fcb\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmig2fcb, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: edc22dec-97d9-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmig2fcb" level=info timestamp=2018-08-04T11:37:28.695130Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiflk66 kind= uid=c498bc50-97da-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:37:28.695266Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiflk66 kind= uid=c498bc50-97da-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:37:28.746038Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiflk66\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiflk66" level=info timestamp=2018-08-04T11:42:29.986869Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiflk66\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiflk66, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: c498bc50-97da-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiflk66" level=info timestamp=2018-08-04T11:42:30.142768Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9ttsr kind= uid=784644a3-97db-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:42:30.142874Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9ttsr kind= uid=784644a3-97db-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:48:30.619004Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmifk4c9 kind= uid=4f1fb50a-97dc-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:48:30.619152Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmifk4c9 kind= uid=4f1fb50a-97dc-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[60:{} 61:{} 236:{} 183:{} 231:{} 227:{} 59:{} 184:{} 228:{} 235:{} 232:{} 144:{} 63:{} 62:{}] level=error timestamp=2018-08-04T11:53:39.041694Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[232:{} 236:{} 59:{} 227:{} 184:{} 61:{} 62:{} 183:{} 144:{} 228:{} 231:{} 60:{} 235:{} 63:{}] level=error timestamp=2018-08-04T11:53:42.065843Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmifk4c9-hxf8f Pod phase: Pending • Failure [360.455 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should stop a VirtualMachineInstance once [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:467 Timed out after 360.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:480 ------------------------------ STEP: getting an VMI STEP: Invoking virtctl stop STEP: Ensuring VMI is running Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T11:56:46.631866Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:56:51 http: TLS handshake error from 10.128.0.1:59894: EOF level=info timestamp=2018-08-04T11:56:52.775422Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:56:58.413768Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:57:01 http: TLS handshake error from 10.128.0.1:59944: EOF level=info timestamp=2018-08-04T11:57:02.825211Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:57:07.556239Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:57:11 http: TLS handshake error from 10.128.0.1:59990: EOF level=info timestamp=2018-08-04T11:57:12.874819Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:57:16.695376Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:57:21 http: TLS handshake error from 10.128.0.1:60036: EOF level=info timestamp=2018-08-04T11:57:22.924890Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:57:26.042560Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T11:57:28.481212Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:57:31 http: TLS handshake error from 10.128.0.1:60086: EOF Pod name: virt-api-7d79764579-lbcmq Pod phase: Running level=info timestamp=2018-08-04T11:55:25.514653Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:55:27 http: TLS handshake error from 10.129.0.1:50506: EOF 2018/08/04 11:55:37 http: TLS handshake error from 10.129.0.1:50516: EOF 2018/08/04 11:55:47 http: TLS handshake error from 10.129.0.1:50526: EOF 2018/08/04 11:55:57 http: TLS handshake error from 10.129.0.1:50536: EOF 2018/08/04 11:56:07 http: TLS handshake error from 10.129.0.1:50546: EOF 2018/08/04 11:56:17 http: TLS handshake error from 10.129.0.1:50556: EOF 2018/08/04 11:56:27 http: TLS handshake error from 10.129.0.1:50566: EOF 2018/08/04 11:56:37 http: TLS handshake error from 10.129.0.1:50576: EOF 2018/08/04 11:56:47 http: TLS handshake error from 10.129.0.1:50586: EOF level=info timestamp=2018-08-04T11:56:55.532061Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:56:57 http: TLS handshake error from 10.129.0.1:50596: EOF 2018/08/04 11:57:07 http: TLS handshake error from 10.129.0.1:50606: EOF 2018/08/04 11:57:17 http: TLS handshake error from 10.129.0.1:50616: EOF 2018/08/04 11:57:27 http: TLS handshake error from 10.129.0.1:50626: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:37:28.525932Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmig2fcb\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmig2fcb, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: edc22dec-97d9-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmig2fcb" level=info timestamp=2018-08-04T11:37:28.695130Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiflk66 kind= uid=c498bc50-97da-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:37:28.695266Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiflk66 kind= uid=c498bc50-97da-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:37:28.746038Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiflk66\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiflk66" level=info timestamp=2018-08-04T11:42:29.986869Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiflk66\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiflk66, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: c498bc50-97da-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiflk66" level=info timestamp=2018-08-04T11:42:30.142768Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9ttsr kind= uid=784644a3-97db-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:42:30.142874Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9ttsr kind= uid=784644a3-97db-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:48:30.619004Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmifk4c9 kind= uid=4f1fb50a-97dc-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:48:30.619152Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmifk4c9 kind= uid=4f1fb50a-97dc-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:54:30.875881Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifk4c9\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifk4c9, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4f1fb50a-97dc-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmifk4c9" level=info timestamp=2018-08-04T11:54:31.016249Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqszvl kind= uid=25f31692-97dd-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:54:31.016398Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqszvl kind= uid=25f31692-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:54:31.079604Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" level=info timestamp=2018-08-04T11:54:31.089962Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" level=info timestamp=2018-08-04T11:54:31.104690Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[236:{} 59:{} 60:{} 228:{} 232:{} 184:{} 227:{} 231:{} 62:{} 183:{} 235:{} 63:{} 61:{} 144:{}] level=error timestamp=2018-08-04T11:57:04.864355Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[184:{} 62:{} 183:{} 235:{} 59:{} 227:{} 231:{} 232:{} 236:{} 60:{} 61:{} 144:{} 228:{} 63:{}] level=error timestamp=2018-08-04T11:57:07.882296Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmiqszvl-g594h Pod phase: Pending • Failure [180.439 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1034 ------------------------------ STEP: Starting a VirtualMachineInstance STEP: Waiting until the VirtualMachineInstance will start level=info timestamp=2018-08-04T11:54:31.753314Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmiqszvl kind=VirtualMachineInstance uid=25f31692-97dd-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmiqszvl-g594h" Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T11:59:46.935958Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 11:59:51 http: TLS handshake error from 10.128.0.1:60748: EOF level=info timestamp=2018-08-04T11:59:53.605209Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T11:59:56.053931Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T11:59:58.731736Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:00:01 http: TLS handshake error from 10.128.0.1:60798: EOF level=info timestamp=2018-08-04T12:00:03.661165Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:00:07.930765Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:00:11 http: TLS handshake error from 10.128.0.1:60844: EOF level=info timestamp=2018-08-04T12:00:13.713494Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:00:16.991041Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:00:21 http: TLS handshake error from 10.128.0.1:60890: EOF level=info timestamp=2018-08-04T12:00:23.756883Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:00:28.782654Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:00:31 http: TLS handshake error from 10.128.0.1:60940: EOF Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 11:58:27 http: TLS handshake error from 10.129.0.1:50688: EOF 2018/08/04 11:58:37 http: TLS handshake error from 10.129.0.1:50698: EOF 2018/08/04 11:58:47 http: TLS handshake error from 10.129.0.1:50708: EOF 2018/08/04 11:58:57 http: TLS handshake error from 10.129.0.1:50718: EOF 2018/08/04 11:59:07 http: TLS handshake error from 10.129.0.1:50728: EOF 2018/08/04 11:59:17 http: TLS handshake error from 10.129.0.1:50738: EOF level=info timestamp=2018-08-04T11:59:25.555948Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 11:59:27 http: TLS handshake error from 10.129.0.1:50748: EOF 2018/08/04 11:59:37 http: TLS handshake error from 10.129.0.1:50758: EOF 2018/08/04 11:59:47 http: TLS handshake error from 10.129.0.1:50768: EOF 2018/08/04 11:59:57 http: TLS handshake error from 10.129.0.1:50778: EOF 2018/08/04 12:00:07 http: TLS handshake error from 10.129.0.1:50788: EOF 2018/08/04 12:00:17 http: TLS handshake error from 10.129.0.1:50798: EOF level=info timestamp=2018-08-04T12:00:25.530991Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:00:27 http: TLS handshake error from 10.129.0.1:50808: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:42:30.142768Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9ttsr kind= uid=784644a3-97db-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:42:30.142874Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9ttsr kind= uid=784644a3-97db-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:48:30.619004Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmifk4c9 kind= uid=4f1fb50a-97dc-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:48:30.619152Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmifk4c9 kind= uid=4f1fb50a-97dc-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:54:30.875881Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifk4c9\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifk4c9, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4f1fb50a-97dc-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmifk4c9" level=info timestamp=2018-08-04T11:54:31.016249Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqszvl kind= uid=25f31692-97dd-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:54:31.016398Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqszvl kind= uid=25f31692-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:54:31.079604Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" level=info timestamp=2018-08-04T11:54:31.089962Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" level=info timestamp=2018-08-04T11:54:31.104690Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" level=info timestamp=2018-08-04T11:57:31.455036Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirfx8g kind= uid=917ffc2b-97dd-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:57:31.455209Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirfx8g kind= uid=917ffc2b-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:57:31.508378Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T11:57:31.518982Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T11:57:31.527361Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" Pod name: virt-handler-n99ls Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[183:{} 235:{} 236:{} 61:{} 62:{} 59:{} 184:{} 231:{} 232:{} 144:{} 228:{} 60:{} 227:{} 63:{}] Pod name: virt-handler-tt462 Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[59:{} 227:{} 183:{} 144:{} 61:{} 62:{} 228:{} 235:{} 63:{} 232:{} 236:{} 231:{} 60:{} 184:{}] Pod name: virt-launcher-testvmirfx8g-8pffn Pod phase: Pending • Failure [180.438 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1034 ------------------------------ STEP: Starting a VirtualMachineInstance STEP: Waiting until the VirtualMachineInstance will start level=info timestamp=2018-08-04T11:57:32.149930Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmirfx8g kind=VirtualMachineInstance uid=917ffc2b-97dd-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmirfx8g-8pffn" Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T12:02:47.273703Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:02:51 http: TLS handshake error from 10.128.0.1:33474: EOF level=info timestamp=2018-08-04T12:02:54.465864Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:02:56.019352Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T12:02:59.093829Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:03:01 http: TLS handshake error from 10.128.0.1:33524: EOF level=info timestamp=2018-08-04T12:03:04.506950Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:03:08.337761Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:03:11 http: TLS handshake error from 10.128.0.1:33570: EOF level=info timestamp=2018-08-04T12:03:14.546338Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:03:17.329638Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:03:21 http: TLS handshake error from 10.128.0.1:33616: EOF level=info timestamp=2018-08-04T12:03:24.586316Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:03:29.147872Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:03:31 http: TLS handshake error from 10.128.0.1:33666: EOF Pod name: virt-api-7d79764579-lbcmq Pod phase: Running level=info timestamp=2018-08-04T12:01:25.896364Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:01:27 http: TLS handshake error from 10.129.0.1:50870: EOF 2018/08/04 12:01:37 http: TLS handshake error from 10.129.0.1:50880: EOF 2018/08/04 12:01:47 http: TLS handshake error from 10.129.0.1:50890: EOF 2018/08/04 12:01:57 http: TLS handshake error from 10.129.0.1:50900: EOF 2018/08/04 12:02:07 http: TLS handshake error from 10.129.0.1:50910: EOF 2018/08/04 12:02:17 http: TLS handshake error from 10.129.0.1:50920: EOF 2018/08/04 12:02:27 http: TLS handshake error from 10.129.0.1:50930: EOF 2018/08/04 12:02:37 http: TLS handshake error from 10.129.0.1:50940: EOF 2018/08/04 12:02:47 http: TLS handshake error from 10.129.0.1:50950: EOF 2018/08/04 12:02:57 http: TLS handshake error from 10.129.0.1:50960: EOF 2018/08/04 12:03:07 http: TLS handshake error from 10.129.0.1:50970: EOF 2018/08/04 12:03:17 http: TLS handshake error from 10.129.0.1:50980: EOF level=info timestamp=2018-08-04T12:03:25.560583Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:03:27 http: TLS handshake error from 10.129.0.1:50990: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:48:30.619152Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmifk4c9 kind= uid=4f1fb50a-97dc-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:54:30.875881Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmifk4c9\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmifk4c9, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4f1fb50a-97dc-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmifk4c9" level=info timestamp=2018-08-04T11:54:31.016249Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqszvl kind= uid=25f31692-97dd-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:54:31.016398Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqszvl kind= uid=25f31692-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:54:31.079604Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" level=info timestamp=2018-08-04T11:54:31.089962Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" level=info timestamp=2018-08-04T11:54:31.104690Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" level=info timestamp=2018-08-04T11:57:31.455036Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirfx8g kind= uid=917ffc2b-97dd-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:57:31.455209Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirfx8g kind= uid=917ffc2b-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:57:31.508378Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T11:57:31.518982Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T11:57:31.527361Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T12:00:31.894921Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitddrb kind= uid=fd0cb39f-97dd-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:00:31.895100Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitddrb kind= uid=fd0cb39f-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:00:31.962058Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitddrb\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitddrb" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[236:{} 63:{} 232:{} 59:{} 184:{} 62:{} 183:{} 235:{} 228:{} 231:{} 60:{} 61:{} 227:{} 144:{}] level=error timestamp=2018-08-04T12:02:59.071287Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[60:{} 227:{} 144:{} 63:{} 235:{} 236:{} 59:{} 61:{} 62:{} 228:{} 184:{} 232:{} 183:{} 231:{}] level=error timestamp=2018-08-04T12:03:02.049748Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmitddrb-xf4b5 Pod phase: Pending • Failure [180.441 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.003s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1034 ------------------------------ STEP: Starting and stopping the VirtualMachineInstance number of times STEP: Starting a VirtualMachineInstance STEP: Waiting until the VirtualMachineInstance will start level=info timestamp=2018-08-04T12:00:32.563913Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmitddrb kind=VirtualMachineInstance uid=fd0cb39f-97dd-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmitddrb-xf4b5" Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T12:05:45.324061Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:05:47.614245Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:05:51 http: TLS handshake error from 10.128.0.1:34336: EOF level=info timestamp=2018-08-04T12:05:55.377276Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:05:59.446703Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:06:01 http: TLS handshake error from 10.128.0.1:34386: EOF level=info timestamp=2018-08-04T12:06:05.435146Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:06:08.682050Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:06:11 http: TLS handshake error from 10.128.0.1:34432: EOF level=info timestamp=2018-08-04T12:06:15.476102Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:06:17.674578Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:06:21 http: TLS handshake error from 10.128.0.1:34478: EOF level=info timestamp=2018-08-04T12:06:25.520080Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:06:29.675773Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:06:31 http: TLS handshake error from 10.128.0.1:34528: EOF Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 12:04:37 http: TLS handshake error from 10.129.0.1:51062: EOF 2018/08/04 12:04:47 http: TLS handshake error from 10.129.0.1:51072: EOF level=info timestamp=2018-08-04T12:04:55.540128Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:04:57 http: TLS handshake error from 10.129.0.1:51082: EOF 2018/08/04 12:05:07 http: TLS handshake error from 10.129.0.1:51092: EOF 2018/08/04 12:05:17 http: TLS handshake error from 10.129.0.1:51102: EOF 2018/08/04 12:05:27 http: TLS handshake error from 10.129.0.1:51112: EOF 2018/08/04 12:05:37 http: TLS handshake error from 10.129.0.1:51122: EOF 2018/08/04 12:05:47 http: TLS handshake error from 10.129.0.1:51132: EOF level=info timestamp=2018-08-04T12:05:55.512556Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:05:57 http: TLS handshake error from 10.129.0.1:51142: EOF 2018/08/04 12:06:07 http: TLS handshake error from 10.129.0.1:51152: EOF 2018/08/04 12:06:17 http: TLS handshake error from 10.129.0.1:51162: EOF level=info timestamp=2018-08-04T12:06:25.550175Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:06:27 http: TLS handshake error from 10.129.0.1:51172: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:54:31.016398Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqszvl kind= uid=25f31692-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:54:31.079604Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" level=info timestamp=2018-08-04T11:54:31.089962Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" level=info timestamp=2018-08-04T11:54:31.104690Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqszvl\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqszvl" level=info timestamp=2018-08-04T11:57:31.455036Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirfx8g kind= uid=917ffc2b-97dd-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:57:31.455209Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirfx8g kind= uid=917ffc2b-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:57:31.508378Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T11:57:31.518982Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T11:57:31.527361Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T12:00:31.894921Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitddrb kind= uid=fd0cb39f-97dd-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:00:31.895100Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitddrb kind= uid=fd0cb39f-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:00:31.962058Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitddrb\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitddrb" level=info timestamp=2018-08-04T12:03:32.170518Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitddrb\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmitddrb, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: fd0cb39f-97dd-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitddrb" level=info timestamp=2018-08-04T12:03:32.333423Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitrbxd kind= uid=6899b1ad-97de-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:03:32.333591Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitrbxd kind= uid=6899b1ad-97de-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-n99ls Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[235:{} 236:{} 60:{} 184:{} 183:{} 232:{} 61:{} 144:{} 59:{} 227:{} 231:{} 63:{} 62:{} 228:{}] Pod name: virt-handler-tt462 Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[236:{} 60:{} 61:{} 62:{} 232:{} 184:{} 227:{} 183:{} 59:{} 228:{} 144:{} 231:{} 63:{} 235:{}] Pod name: virt-launcher-testvmitrbxd-q76c4 Pod phase: Pending • Failure [180.437 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1034 ------------------------------ STEP: Starting and stopping the VirtualMachineInstance number of times STEP: Starting a VirtualMachineInstance STEP: Waiting until the VirtualMachineInstance will start level=info timestamp=2018-08-04T12:03:33.072113Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmitrbxd kind=VirtualMachineInstance uid=6899b1ad-97de-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmitrbxd-q76c4" Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T12:08:46.197170Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:08:47.967935Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:08:51 http: TLS handshake error from 10.128.0.1:35288: EOF level=info timestamp=2018-08-04T12:08:56.266730Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:08:59.958857Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:09:01 http: TLS handshake error from 10.128.0.1:35338: EOF level=info timestamp=2018-08-04T12:09:06.327623Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:09:09.037520Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:09:11 http: TLS handshake error from 10.128.0.1:35384: EOF level=info timestamp=2018-08-04T12:09:16.366719Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:09:18.026985Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:09:21 http: TLS handshake error from 10.128.0.1:35430: EOF level=info timestamp=2018-08-04T12:09:26.408955Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:09:30.012880Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:09:31 http: TLS handshake error from 10.128.0.1:35480: EOF Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 12:07:27 http: TLS handshake error from 10.129.0.1:51234: EOF 2018/08/04 12:07:37 http: TLS handshake error from 10.129.0.1:51244: EOF 2018/08/04 12:07:47 http: TLS handshake error from 10.129.0.1:51254: EOF 2018/08/04 12:07:57 http: TLS handshake error from 10.129.0.1:51264: EOF 2018/08/04 12:08:07 http: TLS handshake error from 10.129.0.1:51274: EOF 2018/08/04 12:08:17 http: TLS handshake error from 10.129.0.1:51284: EOF 2018/08/04 12:08:27 http: TLS handshake error from 10.129.0.1:51294: EOF 2018/08/04 12:08:37 http: TLS handshake error from 10.129.0.1:51304: EOF 2018/08/04 12:08:47 http: TLS handshake error from 10.129.0.1:51314: EOF level=info timestamp=2018-08-04T12:08:55.532399Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:08:57 http: TLS handshake error from 10.129.0.1:51324: EOF 2018/08/04 12:09:07 http: TLS handshake error from 10.129.0.1:51334: EOF 2018/08/04 12:09:17 http: TLS handshake error from 10.129.0.1:51344: EOF level=info timestamp=2018-08-04T12:09:25.533668Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:09:27 http: TLS handshake error from 10.129.0.1:51354: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:57:31.455036Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirfx8g kind= uid=917ffc2b-97dd-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T11:57:31.455209Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirfx8g kind= uid=917ffc2b-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T11:57:31.508378Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T11:57:31.518982Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T11:57:31.527361Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T12:00:31.894921Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitddrb kind= uid=fd0cb39f-97dd-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:00:31.895100Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitddrb kind= uid=fd0cb39f-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:00:31.962058Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitddrb\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitddrb" level=info timestamp=2018-08-04T12:03:32.170518Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitddrb\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmitddrb, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: fd0cb39f-97dd-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitddrb" level=info timestamp=2018-08-04T12:03:32.333423Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitrbxd kind= uid=6899b1ad-97de-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:03:32.333591Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitrbxd kind= uid=6899b1ad-97de-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:06:32.610468Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitrbxd\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmitrbxd, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 6899b1ad-97de-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitrbxd" level=info timestamp=2018-08-04T12:06:32.771917Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiwg2j8 kind= uid=d4266c6e-97de-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:06:32.772062Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiwg2j8 kind= uid=d4266c6e-97de-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:06:32.837062Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiwg2j8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiwg2j8" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[236:{} 61:{} 235:{} 63:{} 232:{} 60:{} 62:{} 144:{} 59:{} 184:{} 227:{} 183:{} 228:{} 231:{}] level=error timestamp=2018-08-04T12:08:42.785817Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[227:{} 231:{} 61:{} 183:{} 63:{} 232:{} 236:{} 59:{} 60:{} 184:{} 62:{} 144:{} 228:{} 235:{}] level=error timestamp=2018-08-04T12:08:45.761811Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmiwg2j8-zsr2z Pod phase: Pending • Failure [180.442 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:113 should create a writeable emptyDisk with the right capacity [It] /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:115 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1034 ------------------------------ STEP: Starting a VirtualMachineInstance STEP: Waiting until the VirtualMachineInstance will start level=info timestamp=2018-08-04T12:06:33.432505Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmiwg2j8 kind=VirtualMachineInstance uid=d4266c6e-97de-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmiwg2j8-zsr2z" Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T12:11:48.324496Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:11:51 http: TLS handshake error from 10.128.0.1:36150: EOF level=info timestamp=2018-08-04T12:11:57.121375Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:12:00.324498Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:12:01 http: TLS handshake error from 10.128.0.1:36200: EOF level=info timestamp=2018-08-04T12:12:07.179047Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:12:09.382329Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:12:11 http: TLS handshake error from 10.128.0.1:36246: EOF level=info timestamp=2018-08-04T12:12:17.224863Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:12:18.381033Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:12:21 http: TLS handshake error from 10.128.0.1:36292: EOF level=info timestamp=2018-08-04T12:12:26.056432Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T12:12:27.288783Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:12:30.385121Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:12:31 http: TLS handshake error from 10.128.0.1:36342: EOF Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 12:10:27 http: TLS handshake error from 10.129.0.1:51416: EOF 2018/08/04 12:10:37 http: TLS handshake error from 10.129.0.1:51426: EOF 2018/08/04 12:10:47 http: TLS handshake error from 10.129.0.1:51436: EOF level=info timestamp=2018-08-04T12:10:55.556310Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:10:57 http: TLS handshake error from 10.129.0.1:51446: EOF 2018/08/04 12:11:07 http: TLS handshake error from 10.129.0.1:51456: EOF 2018/08/04 12:11:17 http: TLS handshake error from 10.129.0.1:51466: EOF 2018/08/04 12:11:27 http: TLS handshake error from 10.129.0.1:51476: EOF 2018/08/04 12:11:37 http: TLS handshake error from 10.129.0.1:51486: EOF 2018/08/04 12:11:47 http: TLS handshake error from 10.129.0.1:51496: EOF level=info timestamp=2018-08-04T12:11:55.538074Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:11:57 http: TLS handshake error from 10.129.0.1:51506: EOF 2018/08/04 12:12:07 http: TLS handshake error from 10.129.0.1:51516: EOF 2018/08/04 12:12:17 http: TLS handshake error from 10.129.0.1:51526: EOF 2018/08/04 12:12:27 http: TLS handshake error from 10.129.0.1:51536: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T11:57:31.518982Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T11:57:31.527361Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirfx8g\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirfx8g" level=info timestamp=2018-08-04T12:00:31.894921Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitddrb kind= uid=fd0cb39f-97dd-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:00:31.895100Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitddrb kind= uid=fd0cb39f-97dd-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:00:31.962058Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitddrb\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitddrb" level=info timestamp=2018-08-04T12:03:32.170518Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitddrb\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmitddrb, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: fd0cb39f-97dd-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitddrb" level=info timestamp=2018-08-04T12:03:32.333423Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitrbxd kind= uid=6899b1ad-97de-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:03:32.333591Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitrbxd kind= uid=6899b1ad-97de-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:06:32.610468Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitrbxd\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmitrbxd, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 6899b1ad-97de-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitrbxd" level=info timestamp=2018-08-04T12:06:32.771917Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiwg2j8 kind= uid=d4266c6e-97de-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:06:32.772062Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiwg2j8 kind= uid=d4266c6e-97de-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:06:32.837062Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiwg2j8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiwg2j8" level=info timestamp=2018-08-04T12:09:33.055328Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiwg2j8\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiwg2j8, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: d4266c6e-97de-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiwg2j8" level=info timestamp=2018-08-04T12:09:33.225271Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqpvb6 kind= uid=3fb3fb5f-97df-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:09:33.225424Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqpvb6 kind= uid=3fb3fb5f-97df-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter level=error timestamp=2018-08-04T12:12:18.243919Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Printing discovered devices map[236:{} 228:{} 235:{} 231:{} 63:{} 60:{} 62:{} 232:{} 59:{} 184:{} 144:{} 61:{} 227:{} 183:{}] Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[235:{} 231:{} 184:{} 227:{} 228:{} 62:{} 183:{} 63:{} 232:{} 59:{} 61:{} 144:{} 236:{} 60:{}] level=error timestamp=2018-08-04T12:12:21.223751Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmiqpvb6-jgbrs Pod phase: Pending • Failure [180.454 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined and a specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:163 should create a writeable emptyDisk with the specified serial number [It] /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:165 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1034 ------------------------------ STEP: Starting a VirtualMachineInstance STEP: Waiting until the VirtualMachineInstance will start level=info timestamp=2018-08-04T12:09:33.885653Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmiqpvb6 kind=VirtualMachineInstance uid=3fb3fb5f-97df-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmiqpvb6-jgbrs" Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T12:14:47.975435Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:14:48.675896Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:14:51 http: TLS handshake error from 10.128.0.1:37010: EOF level=info timestamp=2018-08-04T12:14:58.018001Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:15:00.731710Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:15:01 http: TLS handshake error from 10.128.0.1:37060: EOF level=info timestamp=2018-08-04T12:15:08.070388Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:15:09.720113Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:15:11 http: TLS handshake error from 10.128.0.1:37106: EOF level=info timestamp=2018-08-04T12:15:18.160228Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:15:18.732303Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:15:21 http: TLS handshake error from 10.128.0.1:37152: EOF level=info timestamp=2018-08-04T12:15:28.207439Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:15:30.831437Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:15:31 http: TLS handshake error from 10.128.0.1:37202: EOF Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 12:13:37 http: TLS handshake error from 10.129.0.1:51608: EOF 2018/08/04 12:13:47 http: TLS handshake error from 10.129.0.1:51618: EOF 2018/08/04 12:13:57 http: TLS handshake error from 10.129.0.1:51628: EOF 2018/08/04 12:14:07 http: TLS handshake error from 10.129.0.1:51638: EOF 2018/08/04 12:14:17 http: TLS handshake error from 10.129.0.1:51648: EOF level=info timestamp=2018-08-04T12:14:25.579528Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:14:27 http: TLS handshake error from 10.129.0.1:51658: EOF 2018/08/04 12:14:37 http: TLS handshake error from 10.129.0.1:51668: EOF 2018/08/04 12:14:47 http: TLS handshake error from 10.129.0.1:51678: EOF level=info timestamp=2018-08-04T12:14:55.563511Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:14:57 http: TLS handshake error from 10.129.0.1:51688: EOF 2018/08/04 12:15:07 http: TLS handshake error from 10.129.0.1:51698: EOF 2018/08/04 12:15:17 http: TLS handshake error from 10.129.0.1:51708: EOF level=info timestamp=2018-08-04T12:15:25.506815Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:15:27 http: TLS handshake error from 10.129.0.1:51718: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T12:03:32.333423Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitrbxd kind= uid=6899b1ad-97de-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:03:32.333591Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmitrbxd kind= uid=6899b1ad-97de-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:06:32.610468Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmitrbxd\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmitrbxd, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 6899b1ad-97de-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmitrbxd" level=info timestamp=2018-08-04T12:06:32.771917Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiwg2j8 kind= uid=d4266c6e-97de-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:06:32.772062Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiwg2j8 kind= uid=d4266c6e-97de-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:06:32.837062Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiwg2j8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiwg2j8" level=info timestamp=2018-08-04T12:09:33.055328Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiwg2j8\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiwg2j8, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: d4266c6e-97de-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiwg2j8" level=info timestamp=2018-08-04T12:09:33.225271Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqpvb6 kind= uid=3fb3fb5f-97df-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:09:33.225424Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqpvb6 kind= uid=3fb3fb5f-97df-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:12:33.517590Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqpvb6\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiqpvb6, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 3fb3fb5f-97df-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqpvb6" level=info timestamp=2018-08-04T12:12:33.669102Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmisqcqq kind= uid=ab42dc3e-97df-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:12:33.669283Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmisqcqq kind= uid=ab42dc3e-97df-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:12:33.791982Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:12:33.808093Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:12:33.825678Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[227:{} 144:{} 236:{} 61:{} 62:{} 231:{} 63:{} 59:{} 60:{} 184:{} 235:{} 232:{} 183:{} 228:{}] level=error timestamp=2018-08-04T12:14:07.414688Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[59:{} 184:{} 144:{} 235:{} 62:{} 183:{} 63:{} 231:{} 232:{} 236:{} 60:{} 61:{} 227:{} 228:{}] Pod name: virt-launcher-testvmisqcqq-mn8cd Pod phase: Pending • Failure [180.453 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should be successfully started [It] /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:207 Timed out after 90.007s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1034 ------------------------------ STEP: Starting a VirtualMachineInstance STEP: Waiting until the VirtualMachineInstance will start level=info timestamp=2018-08-04T12:12:34.459978Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmisqcqq kind=VirtualMachineInstance uid=ab42dc3e-97df-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmisqcqq-mn8cd" Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running 2018/08/04 12:17:51 http: TLS handshake error from 10.128.0.1:37960: EOF level=info timestamp=2018-08-04T12:17:56.041552Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T12:17:58.943978Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:18:01.184099Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:18:01 http: TLS handshake error from 10.128.0.1:38010: EOF level=info timestamp=2018-08-04T12:18:08.991718Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:18:10.117318Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:18:11 http: TLS handshake error from 10.128.0.1:38056: EOF level=info timestamp=2018-08-04T12:18:19.058517Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:18:19.088908Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:18:21 http: TLS handshake error from 10.128.0.1:38102: EOF level=info timestamp=2018-08-04T12:18:26.044899Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T12:18:29.112072Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:18:31.236613Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:18:31 http: TLS handshake error from 10.128.0.1:38152: EOF Pod name: virt-api-7d79764579-lbcmq Pod phase: Running level=info timestamp=2018-08-04T12:16:25.511426Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:16:27 http: TLS handshake error from 10.129.0.1:51780: EOF 2018/08/04 12:16:37 http: TLS handshake error from 10.129.0.1:51790: EOF 2018/08/04 12:16:47 http: TLS handshake error from 10.129.0.1:51800: EOF 2018/08/04 12:16:57 http: TLS handshake error from 10.129.0.1:51810: EOF 2018/08/04 12:17:07 http: TLS handshake error from 10.129.0.1:51820: EOF 2018/08/04 12:17:17 http: TLS handshake error from 10.129.0.1:51830: EOF level=info timestamp=2018-08-04T12:17:25.511766Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:17:27 http: TLS handshake error from 10.129.0.1:51840: EOF 2018/08/04 12:17:37 http: TLS handshake error from 10.129.0.1:51850: EOF 2018/08/04 12:17:47 http: TLS handshake error from 10.129.0.1:51860: EOF 2018/08/04 12:17:57 http: TLS handshake error from 10.129.0.1:51870: EOF 2018/08/04 12:18:07 http: TLS handshake error from 10.129.0.1:51880: EOF 2018/08/04 12:18:17 http: TLS handshake error from 10.129.0.1:51890: EOF 2018/08/04 12:18:27 http: TLS handshake error from 10.129.0.1:51900: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T12:06:32.772062Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiwg2j8 kind= uid=d4266c6e-97de-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:06:32.837062Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiwg2j8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiwg2j8" level=info timestamp=2018-08-04T12:09:33.055328Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiwg2j8\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiwg2j8, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: d4266c6e-97de-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiwg2j8" level=info timestamp=2018-08-04T12:09:33.225271Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqpvb6 kind= uid=3fb3fb5f-97df-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:09:33.225424Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqpvb6 kind= uid=3fb3fb5f-97df-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:12:33.517590Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqpvb6\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiqpvb6, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 3fb3fb5f-97df-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqpvb6" level=info timestamp=2018-08-04T12:12:33.669102Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmisqcqq kind= uid=ab42dc3e-97df-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:12:33.669283Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmisqcqq kind= uid=ab42dc3e-97df-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:12:33.791982Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:12:33.808093Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:12:33.825678Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:15:33.959401Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmisqcqq, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: ab42dc3e-97df-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:15:34.122103Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirhm2k kind= uid=16d1f9c9-97e0-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:15:34.122299Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirhm2k kind= uid=16d1f9c9-97e0-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:15:34.202253Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirhm2k\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirhm2k" Pod name: virt-handler-n99ls Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[227:{} 228:{} 232:{} 59:{} 60:{} 144:{} 235:{} 61:{} 184:{} 183:{} 236:{} 62:{} 231:{} 63:{}] Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[227:{} 144:{} 63:{} 232:{} 236:{} 231:{} 62:{} 183:{} 61:{} 184:{} 228:{} 235:{} 59:{} 60:{}] level=error timestamp=2018-08-04T12:17:52.667870Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmirhm2k-jl2sc Pod phase: Pending • Failure [180.442 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should not persist data [It] /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:218 Timed out after 90.003s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1034 ------------------------------ STEP: Starting the VirtualMachineInstance STEP: Starting a VirtualMachineInstance STEP: Waiting until the VirtualMachineInstance will start level=info timestamp=2018-08-04T12:15:34.832516Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmirhm2k kind=VirtualMachineInstance uid=16d1f9c9-97e0-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmirhm2k-jl2sc" Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T12:22:10.145921Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:22:10.567993Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:22:11 http: TLS handshake error from 10.128.0.1:39204: EOF level=info timestamp=2018-08-04T12:22:14.175086Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:22:14.200850Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:22:14.249385Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:22:19.584624Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:22:20.191543Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:22:21 http: TLS handshake error from 10.128.0.1:39252: EOF level=info timestamp=2018-08-04T12:22:25.796605Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:22:25.808971Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:22:25.819772Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:22:30.232669Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:22:31 http: TLS handshake error from 10.128.0.1:39302: EOF level=info timestamp=2018-08-04T12:22:32.036697Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 12:20:57 http: TLS handshake error from 10.129.0.1:52052: EOF 2018/08/04 12:21:07 http: TLS handshake error from 10.129.0.1:52062: EOF 2018/08/04 12:21:17 http: TLS handshake error from 10.129.0.1:52072: EOF level=info timestamp=2018-08-04T12:21:25.009723Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T12:21:25.571036Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T12:21:25.892610Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:21:27 http: TLS handshake error from 10.129.0.1:52082: EOF 2018/08/04 12:21:37 http: TLS handshake error from 10.129.0.1:52092: EOF 2018/08/04 12:21:47 http: TLS handshake error from 10.129.0.1:52102: EOF level=info timestamp=2018-08-04T12:21:55.566376Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:21:57 http: TLS handshake error from 10.129.0.1:52112: EOF 2018/08/04 12:22:07 http: TLS handshake error from 10.129.0.1:52122: EOF 2018/08/04 12:22:17 http: TLS handshake error from 10.129.0.1:52132: EOF level=info timestamp=2018-08-04T12:22:25.522003Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:22:27 http: TLS handshake error from 10.129.0.1:52144: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T12:09:33.225271Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqpvb6 kind= uid=3fb3fb5f-97df-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:09:33.225424Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqpvb6 kind= uid=3fb3fb5f-97df-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:12:33.517590Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqpvb6\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiqpvb6, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 3fb3fb5f-97df-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqpvb6" level=info timestamp=2018-08-04T12:12:33.669102Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmisqcqq kind= uid=ab42dc3e-97df-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:12:33.669283Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmisqcqq kind= uid=ab42dc3e-97df-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:12:33.791982Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:12:33.808093Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:12:33.825678Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:15:33.959401Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmisqcqq, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: ab42dc3e-97df-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:15:34.122103Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirhm2k kind= uid=16d1f9c9-97e0-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:15:34.122299Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirhm2k kind= uid=16d1f9c9-97e0-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:15:34.202253Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirhm2k\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirhm2k" level=info timestamp=2018-08-04T12:18:34.398007Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirhm2k\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmirhm2k, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 16d1f9c9-97e0-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirhm2k" level=info timestamp=2018-08-04T12:18:34.589474Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqk7sp kind= uid=8262a5c0-97e0-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:18:34.589643Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqk7sp kind= uid=8262a5c0-97e0-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-n99ls Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[236:{} 228:{} 235:{} 59:{} 227:{} 183:{} 63:{} 232:{} 60:{} 61:{} 184:{} 62:{} 144:{} 231:{}] Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[236:{} 63:{} 227:{} 60:{} 61:{} 184:{} 144:{} 235:{} 231:{} 183:{} 228:{} 232:{} 59:{} 62:{}] level=error timestamp=2018-08-04T12:21:51.191823Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmiqk7sp-lvw8g Pod phase: Pending • Failure [240.498 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With VirtualMachineInstance with two PVCs /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:266 should start vmi multiple times [It] /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:278 Timed out after 120.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1034 ------------------------------ STEP: Starting and stopping the VirtualMachineInstance number of times STEP: Starting a VirtualMachineInstance STEP: Waiting until the VirtualMachineInstance will start level=info timestamp=2018-08-04T12:18:35.266733Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmiqk7sp kind=VirtualMachineInstance uid=8262a5c0-97e0-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmiqk7sp-lvw8g" Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T12:25:11.151991Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:25:11 http: TLS handshake error from 10.128.0.1:40064: EOF level=info timestamp=2018-08-04T12:25:14.827496Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:25:14.845141Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:25:14.858613Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:25:19.916528Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:25:21.188649Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:25:21 http: TLS handshake error from 10.128.0.1:40112: EOF level=info timestamp=2018-08-04T12:25:26.070726Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T12:25:26.447121Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:25:26.464537Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:25:26.479148Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:25:31.245321Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:25:31 http: TLS handshake error from 10.128.0.1:40162: EOF level=info timestamp=2018-08-04T12:25:32.406918Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 12:23:27 http: TLS handshake error from 10.129.0.1:52204: EOF 2018/08/04 12:23:37 http: TLS handshake error from 10.129.0.1:52214: EOF 2018/08/04 12:23:47 http: TLS handshake error from 10.129.0.1:52224: EOF level=info timestamp=2018-08-04T12:23:55.523400Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:23:57 http: TLS handshake error from 10.129.0.1:52234: EOF 2018/08/04 12:24:07 http: TLS handshake error from 10.129.0.1:52244: EOF 2018/08/04 12:24:17 http: TLS handshake error from 10.129.0.1:52254: EOF 2018/08/04 12:24:27 http: TLS handshake error from 10.129.0.1:52264: EOF 2018/08/04 12:24:37 http: TLS handshake error from 10.129.0.1:52274: EOF 2018/08/04 12:24:47 http: TLS handshake error from 10.129.0.1:52284: EOF level=info timestamp=2018-08-04T12:24:55.535936Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:24:57 http: TLS handshake error from 10.129.0.1:52294: EOF 2018/08/04 12:25:07 http: TLS handshake error from 10.129.0.1:52304: EOF 2018/08/04 12:25:17 http: TLS handshake error from 10.129.0.1:52314: EOF 2018/08/04 12:25:27 http: TLS handshake error from 10.129.0.1:52326: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T12:12:33.791982Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:12:33.808093Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:12:33.825678Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:15:33.959401Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisqcqq\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmisqcqq, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: ab42dc3e-97df-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmisqcqq" level=info timestamp=2018-08-04T12:15:34.122103Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirhm2k kind= uid=16d1f9c9-97e0-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:15:34.122299Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirhm2k kind= uid=16d1f9c9-97e0-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:15:34.202253Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirhm2k\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirhm2k" level=info timestamp=2018-08-04T12:18:34.398007Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirhm2k\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmirhm2k, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 16d1f9c9-97e0-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirhm2k" level=info timestamp=2018-08-04T12:18:34.589474Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqk7sp kind= uid=8262a5c0-97e0-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:18:34.589643Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqk7sp kind= uid=8262a5c0-97e0-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:22:34.936054Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqk7sp\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiqk7sp, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 8262a5c0-97e0-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqk7sp" level=info timestamp=2018-08-04T12:22:35.065816Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9kktr kind= uid=11b8ee70-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:22:35.065946Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9kktr kind= uid=11b8ee70-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:22:35.120152Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kktr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kktr" level=info timestamp=2018-08-04T12:22:35.134174Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kktr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kktr" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[228:{} 231:{} 61:{} 144:{} 184:{} 62:{} 183:{} 63:{} 232:{} 227:{} 60:{} 235:{} 236:{} 59:{}] level=error timestamp=2018-08-04T12:25:26.236307Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[183:{} 228:{} 232:{} 62:{} 227:{} 235:{} 231:{} 59:{} 60:{} 63:{} 61:{} 144:{} 236:{} 184:{}] level=error timestamp=2018-08-04T12:25:29.165617Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmi9kktr-cq58n Pod phase: Pending • Failure [180.443 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with 3 CPU cores /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:56 should report 3 cpu cores under guest OS [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:62 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1047 ------------------------------ STEP: Starting a VirtualMachineInstance level=info timestamp=2018-08-04T12:22:35.726773Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmi9kktr kind=VirtualMachineInstance uid=11b8ee70-97e1-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmi9kktr-cq58n" • Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running 2018/08/04 12:28:11 http: TLS handshake error from 10.128.0.1:41016: EOF level=info timestamp=2018-08-04T12:28:11.470246Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:28:12.029122Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:28:15.474722Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:28:15.497154Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:28:15.507493Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:28:20.256510Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:28:21 http: TLS handshake error from 10.128.0.1:41064: EOF level=info timestamp=2018-08-04T12:28:22.078240Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:28:27.024211Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:28:27.038455Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:28:27.048517Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:28:31 http: TLS handshake error from 10.128.0.1:41114: EOF level=info timestamp=2018-08-04T12:28:32.142563Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:28:32.752738Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 12:26:27 http: TLS handshake error from 10.129.0.1:52386: EOF 2018/08/04 12:26:37 http: TLS handshake error from 10.129.0.1:52396: EOF 2018/08/04 12:26:47 http: TLS handshake error from 10.129.0.1:52406: EOF 2018/08/04 12:26:57 http: TLS handshake error from 10.129.0.1:52416: EOF 2018/08/04 12:27:07 http: TLS handshake error from 10.129.0.1:52426: EOF 2018/08/04 12:27:17 http: TLS handshake error from 10.129.0.1:52436: EOF level=info timestamp=2018-08-04T12:27:25.554392Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:27:27 http: TLS handshake error from 10.129.0.1:52446: EOF 2018/08/04 12:27:37 http: TLS handshake error from 10.129.0.1:52456: EOF 2018/08/04 12:27:47 http: TLS handshake error from 10.129.0.1:52466: EOF 2018/08/04 12:27:57 http: TLS handshake error from 10.129.0.1:52476: EOF 2018/08/04 12:28:07 http: TLS handshake error from 10.129.0.1:52486: EOF 2018/08/04 12:28:17 http: TLS handshake error from 10.129.0.1:52496: EOF level=info timestamp=2018-08-04T12:28:25.556546Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:28:27 http: TLS handshake error from 10.129.0.1:52508: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T12:15:34.202253Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirhm2k\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirhm2k" level=info timestamp=2018-08-04T12:18:34.398007Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirhm2k\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmirhm2k, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 16d1f9c9-97e0-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirhm2k" level=info timestamp=2018-08-04T12:18:34.589474Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqk7sp kind= uid=8262a5c0-97e0-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:18:34.589643Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqk7sp kind= uid=8262a5c0-97e0-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:22:34.936054Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqk7sp\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiqk7sp, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 8262a5c0-97e0-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqk7sp" level=info timestamp=2018-08-04T12:22:35.065816Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9kktr kind= uid=11b8ee70-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:22:35.065946Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9kktr kind= uid=11b8ee70-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:22:35.120152Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kktr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kktr" level=info timestamp=2018-08-04T12:22:35.134174Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kktr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kktr" level=info timestamp=2018-08-04T12:25:35.344060Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kktr\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi9kktr, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 11b8ee70-97e1-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kktr" level=info timestamp=2018-08-04T12:25:35.528791Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmin6mmz kind= uid=7d491958-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:25:35.528943Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmin6mmz kind= uid=7d491958-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:25:36.581965Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmin6mmz\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmin6mmz, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 7d491958-97e1-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmin6mmz" level=info timestamp=2018-08-04T12:25:36.778296Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmimb4w7 kind= uid=7e07880b-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:25:36.778394Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmimb4w7 kind= uid=7e07880b-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-n99ls Pod phase: Running 59 network_throughput 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[60:{} 63:{} 232:{} 61:{} 183:{} 228:{} 231:{} 236:{} 59:{} 184:{} 227:{} 62:{} 144:{} 235:{}] Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[60:{} 62:{} 144:{} 235:{} 231:{} 61:{} 227:{} 183:{} 59:{} 63:{} 232:{} 236:{} 184:{} 228:{}] level=error timestamp=2018-08-04T12:28:18.327292Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmimb4w7-hk6xh Pod phase: Pending ------------------------------ • Failure [180.448 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:164 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-2Mi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1047 ------------------------------ STEP: Starting a VM level=info timestamp=2018-08-04T12:25:37.497585Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmimb4w7 kind=VirtualMachineInstance uid=7e07880b-97e1-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmimb4w7-hk6xh" S [SKIPPING] [0.213 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:164 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-1Gi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 No node with hugepages hugepages-1Gi capacity /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:216 ------------------------------ • Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T12:31:11.816887Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:31:12.934534Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:31:16.096553Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:31:16.112054Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:31:16.129594Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:31:20.600143Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:31:21 http: TLS handshake error from 10.128.0.1:41918: EOF level=info timestamp=2018-08-04T12:31:22.977936Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:31:25.507308Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T12:31:27.561971Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:31:27.573790Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:31:27.584549Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:31:31 http: TLS handshake error from 10.128.0.1:41976: EOF level=info timestamp=2018-08-04T12:31:33.017154Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:31:33.092889Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 12:29:57 http: TLS handshake error from 10.129.0.1:52598: EOF 2018/08/04 12:30:07 http: TLS handshake error from 10.129.0.1:52608: EOF 2018/08/04 12:30:17 http: TLS handshake error from 10.129.0.1:52618: EOF level=info timestamp=2018-08-04T12:30:25.510669Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:30:27 http: TLS handshake error from 10.129.0.1:52628: EOF 2018/08/04 12:30:37 http: TLS handshake error from 10.129.0.1:52638: EOF 2018/08/04 12:30:47 http: TLS handshake error from 10.129.0.1:52648: EOF level=info timestamp=2018-08-04T12:30:55.560857Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:30:57 http: TLS handshake error from 10.129.0.1:52658: EOF 2018/08/04 12:31:07 http: TLS handshake error from 10.129.0.1:52668: EOF 2018/08/04 12:31:17 http: TLS handshake error from 10.129.0.1:52678: EOF level=info timestamp=2018-08-04T12:31:25.514290Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T12:31:25.892559Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:31:27 http: TLS handshake error from 10.129.0.1:52690: EOF 2018/08/04 12:31:37 http: TLS handshake error from 10.129.0.1:52700: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T12:22:35.120152Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kktr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kktr" level=info timestamp=2018-08-04T12:22:35.134174Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kktr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kktr" level=info timestamp=2018-08-04T12:25:35.344060Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kktr\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi9kktr, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 11b8ee70-97e1-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kktr" level=info timestamp=2018-08-04T12:25:35.528791Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmin6mmz kind= uid=7d491958-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:25:35.528943Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmin6mmz kind= uid=7d491958-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:25:36.581965Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmin6mmz\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmin6mmz, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 7d491958-97e1-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmin6mmz" level=info timestamp=2018-08-04T12:25:36.778296Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmimb4w7 kind= uid=7e07880b-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:25:36.778394Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmimb4w7 kind= uid=7e07880b-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:28:37.271859Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqklcr kind= uid=e99cd856-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:28:37.272030Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqklcr kind= uid=e99cd856-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:28:37.319698Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqklcr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqklcr" level=info timestamp=2018-08-04T12:28:37.335081Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqklcr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqklcr" level=info timestamp=2018-08-04T12:28:38.335086Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqklcr\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiqklcr, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: e99cd856-97e1-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqklcr" level=info timestamp=2018-08-04T12:28:38.509092Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibjvss kind= uid=ea59c24e-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:28:38.509251Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibjvss kind= uid=ea59c24e-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[59:{} 61:{} 62:{} 183:{} 60:{} 227:{} 231:{} 184:{} 228:{} 235:{} 63:{} 232:{} 236:{} 144:{}] level=error timestamp=2018-08-04T12:30:26.411176Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[228:{} 63:{} 236:{} 59:{} 184:{} 227:{} 232:{} 60:{} 183:{} 144:{} 235:{} 231:{} 61:{} 62:{}] level=error timestamp=2018-08-04T12:30:29.331005Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmibjvss-r7v5w Pod phase: Pending ------------------------------ • Failure in Spec Setup (BeforeEach) [180.455 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:294 when CPU model defined [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:340 should report defined CPU model /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:341 Timed out after 90.005s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1047 ------------------------------ level=info timestamp=2018-08-04T12:28:39.233325Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmibjvss kind=VirtualMachineInstance uid=ea59c24e-97e1-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmibjvss-r7v5w" Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running level=info timestamp=2018-08-04T12:34:16.707034Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:34:16.726204Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:34:16.737028Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:34:21.009520Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:34:21 http: TLS handshake error from 10.128.0.1:42786: EOF level=info timestamp=2018-08-04T12:34:23.849216Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:34:26.108350Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T12:34:28.089078Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:34:28.102404Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:34:28.113801Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:34:31 http: TLS handshake error from 10.128.0.1:42836: EOF level=info timestamp=2018-08-04T12:34:33.475841Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:34:33.892286Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:34:39.151912Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/openapi/v2 proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-08-04T12:34:39.153699Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/swagger.json proto=HTTP/2.0 statusCode=404 contentLength=19 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 12:32:17 http: TLS handshake error from 10.129.0.1:52740: EOF 2018/08/04 12:32:27 http: TLS handshake error from 10.129.0.1:52750: EOF 2018/08/04 12:32:37 http: TLS handshake error from 10.129.0.1:52760: EOF 2018/08/04 12:32:47 http: TLS handshake error from 10.129.0.1:52770: EOF 2018/08/04 12:32:57 http: TLS handshake error from 10.129.0.1:52780: EOF 2018/08/04 12:33:07 http: TLS handshake error from 10.129.0.1:52790: EOF 2018/08/04 12:33:17 http: TLS handshake error from 10.129.0.1:52800: EOF 2018/08/04 12:33:27 http: TLS handshake error from 10.129.0.1:52810: EOF 2018/08/04 12:33:37 http: TLS handshake error from 10.129.0.1:52820: EOF 2018/08/04 12:33:47 http: TLS handshake error from 10.129.0.1:52830: EOF 2018/08/04 12:33:57 http: TLS handshake error from 10.129.0.1:52840: EOF 2018/08/04 12:34:07 http: TLS handshake error from 10.129.0.1:52850: EOF 2018/08/04 12:34:17 http: TLS handshake error from 10.129.0.1:52860: EOF 2018/08/04 12:34:27 http: TLS handshake error from 10.129.0.1:52872: EOF 2018/08/04 12:34:37 http: TLS handshake error from 10.129.0.1:52882: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T12:25:36.581965Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmin6mmz\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmin6mmz, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 7d491958-97e1-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmin6mmz" level=info timestamp=2018-08-04T12:25:36.778296Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmimb4w7 kind= uid=7e07880b-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:25:36.778394Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmimb4w7 kind= uid=7e07880b-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:28:37.271859Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqklcr kind= uid=e99cd856-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:28:37.272030Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqklcr kind= uid=e99cd856-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:28:37.319698Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqklcr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqklcr" level=info timestamp=2018-08-04T12:28:37.335081Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqklcr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqklcr" level=info timestamp=2018-08-04T12:28:38.335086Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqklcr\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiqklcr, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: e99cd856-97e1-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqklcr" level=info timestamp=2018-08-04T12:28:38.509092Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibjvss kind= uid=ea59c24e-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:28:38.509251Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibjvss kind= uid=ea59c24e-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:31:38.963648Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiktrgk kind= uid=55e8c005-97e2-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:31:38.963822Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiktrgk kind= uid=55e8c005-97e2-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:31:39.039689Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiktrgk\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiktrgk" level=info timestamp=2018-08-04T12:31:39.052895Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiktrgk\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiktrgk" level=info timestamp=2018-08-04T12:31:39.067169Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiktrgk\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiktrgk" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[63:{} 59:{} 61:{} 184:{} 227:{} 228:{} 236:{} 235:{} 231:{} 232:{} 62:{} 60:{} 183:{} 144:{}] level=error timestamp=2018-08-04T12:33:41.555077Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[63:{} 59:{} 60:{} 184:{} 144:{} 228:{} 232:{} 236:{} 61:{} 62:{} 183:{} 231:{} 227:{} 235:{}] level=error timestamp=2018-08-04T12:33:44.468955Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmiktrgk-q7bg9 Pod phase: Pending • Failure in Spec Setup (BeforeEach) [180.448 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:294 when CPU model equals to passthrough [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:368 should report exactly the same model as node CPU /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:369 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1047 ------------------------------ level=info timestamp=2018-08-04T12:31:39.653467Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmiktrgk kind=VirtualMachineInstance uid=55e8c005-97e2-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmiktrgk-q7bg9" Pod name: disks-images-provider-mgvbf Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pknmj Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-ksjrq Pod phase: Running 2018/08/04 12:37:11 http: TLS handshake error from 10.128.0.1:43592: EOF level=info timestamp=2018-08-04T12:37:13.174403Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:37:14.848058Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:37:17.206941Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:37:17.230827Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:37:17.242048Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:37:21.348026Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:37:21 http: TLS handshake error from 10.128.0.1:43640: EOF level=info timestamp=2018-08-04T12:37:24.895932Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:37:28.703742Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:37:28.722128Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:37:28.733049Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/04 12:37:31 http: TLS handshake error from 10.128.0.1:43690: EOF level=info timestamp=2018-08-04T12:37:33.882073Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-04T12:37:34.933662Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-lbcmq Pod phase: Running 2018/08/04 12:35:37 http: TLS handshake error from 10.129.0.1:52942: EOF 2018/08/04 12:35:47 http: TLS handshake error from 10.129.0.1:52952: EOF 2018/08/04 12:35:57 http: TLS handshake error from 10.129.0.1:52962: EOF 2018/08/04 12:36:07 http: TLS handshake error from 10.129.0.1:52972: EOF 2018/08/04 12:36:17 http: TLS handshake error from 10.129.0.1:52982: EOF level=info timestamp=2018-08-04T12:36:25.584549Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:36:27 http: TLS handshake error from 10.129.0.1:52992: EOF 2018/08/04 12:36:37 http: TLS handshake error from 10.129.0.1:53002: EOF 2018/08/04 12:36:47 http: TLS handshake error from 10.129.0.1:53012: EOF 2018/08/04 12:36:57 http: TLS handshake error from 10.129.0.1:53022: EOF 2018/08/04 12:37:07 http: TLS handshake error from 10.129.0.1:53032: EOF 2018/08/04 12:37:17 http: TLS handshake error from 10.129.0.1:53042: EOF level=info timestamp=2018-08-04T12:37:25.561692Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/04 12:37:27 http: TLS handshake error from 10.129.0.1:53052: EOF 2018/08/04 12:37:37 http: TLS handshake error from 10.129.0.1:53064: EOF Pod name: virt-controller-7d57d96b65-csf74 Pod phase: Running level=info timestamp=2018-08-04T11:08:01.366211Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-k6hk6 Pod phase: Running level=info timestamp=2018-08-04T12:28:37.271859Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqklcr kind= uid=e99cd856-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:28:37.272030Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqklcr kind= uid=e99cd856-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:28:37.319698Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqklcr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqklcr" level=info timestamp=2018-08-04T12:28:37.335081Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqklcr\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqklcr" level=info timestamp=2018-08-04T12:28:38.335086Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqklcr\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiqklcr, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: e99cd856-97e1-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqklcr" level=info timestamp=2018-08-04T12:28:38.509092Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibjvss kind= uid=ea59c24e-97e1-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:28:38.509251Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibjvss kind= uid=ea59c24e-97e1-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:31:38.963648Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiktrgk kind= uid=55e8c005-97e2-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:31:38.963822Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiktrgk kind= uid=55e8c005-97e2-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-04T12:31:39.039689Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiktrgk\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiktrgk" level=info timestamp=2018-08-04T12:31:39.052895Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiktrgk\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiktrgk" level=info timestamp=2018-08-04T12:31:39.067169Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiktrgk\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiktrgk" level=info timestamp=2018-08-04T12:34:39.242319Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiktrgk\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmiktrgk, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 55e8c005-97e2-11e8-96b4-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiktrgk" level=info timestamp=2018-08-04T12:34:39.409125Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmis22pj kind= uid=c1767db2-97e2-11e8-96b4-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-04T12:34:39.409360Z pos=preset.go:171 component=virt-controller service=http namespace=kubevirt-test-default name=testvmis22pj kind= uid=c1767db2-97e2-11e8-96b4-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-n99ls Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter level=error timestamp=2018-08-04T12:37:14.639781Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Printing discovered devices map[59:{} 228:{} 62:{} 183:{} 235:{} 63:{} 227:{} 144:{} 232:{} 236:{} 60:{} 61:{} 184:{} 231:{}] Pod name: virt-handler-tt462 Pod phase: Running 60 network_latency 61 cpu_dma_latency 184 microcode 227 mcelog 62 crash 183 hw_random 144 nvram 228 hpet 235 autofs 231 snapshot 63 vga_arbiter Printing discovered devices map[61:{} 184:{} 228:{} 231:{} 236:{} 227:{} 144:{} 235:{} 63:{} 60:{} 62:{} 183:{} 232:{} 59:{}] level=error timestamp=2018-08-04T12:37:17.535505Z pos=health.go:55 component=virt-handler reason="tun device does not show up in /proc/misc, is the module loaded?" msg="Check for mandatory device /dev/net/tun failed" Pod name: virt-launcher-testvmis22pj-z7cxw Pod phase: Pending • Failure in Spec Setup (BeforeEach) [180.441 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:294 when CPU model not defined [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:392 should report CPU model from libvirt capabilities /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:393 Timed out after 90.004s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1047 ------------------------------ level=info timestamp=2018-08-04T12:34:40.129681Z pos=utils.go:244 component=tests namespace=kubevirt-test-default name=testvmis22pj kind=VirtualMachineInstance uid=c1767db2-97e2-11e8-96b4-525500d15501 msg="Created virtual machine pod virt-launcher-testvmis22pj-z7cxw" panic: test timed out after 1h30m0s goroutine 10508 [running]: testing.(*M).startAlarm.func1() /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1240 +0xfc created by time.goFunc /gimme/.gimme/versions/go1.10.linux.amd64/src/time/sleep.go:172 +0x44 goroutine 1 [chan receive, 90 minutes]: testing.(*T).Run(0xc4205f51d0, 0x12ad0d7, 0x9, 0x1339680, 0x47fa16) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:825 +0x301 testing.runTests.func1(0xc4205f50e0) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1063 +0x64 testing.tRunner(0xc4205f50e0, 0xc42055bdf8) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:777 +0xd0 testing.runTests(0xc4208f4d20, 0x1bc0e20, 0x1, 0x1, 0x412009) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1061 +0x2c4 testing.(*M).Run(0xc42064fa00, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:978 +0x171 main.main() _testmain.go:44 +0x151 goroutine 20 [chan receive]: kubevirt.io/kubevirt/vendor/github.com/golang/glog.(*loggingT).flushDaemon(0x1be7be0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/golang/glog/glog.go:879 +0x8b created by kubevirt.io/kubevirt/vendor/github.com/golang/glog.init.0 /root/go/src/kubevirt.io/kubevirt/vendor/github.com/golang/glog/glog.go:410 +0x203 goroutine 22 [syscall, 90 minutes]: os/signal.signal_recv(0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/sigqueue.go:139 +0xa6 os/signal.loop() /gimme/.gimme/versions/go1.10.linux.amd64/src/os/signal/signal_unix.go:22 +0x22 created by os/signal.init.0 /gimme/.gimme/versions/go1.10.linux.amd64/src/os/signal/signal_unix.go:28 +0x41 goroutine 24 [sleep]: time.Sleep(0xb18400f) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/time.go:102 +0x166 kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol.realClock.Sleep(0xb18400f) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol/throttle.go:66 +0x2b kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol.(*tokenBucketRateLimiter).Accept(0xc4208b9140) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol/throttle.go:91 +0xbd kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest.(*Request).tryThrottle(0xc420c5ea80) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest/request.go:478 +0x1fd kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest.(*Request).Do(0xc420c5ea80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest/request.go:733 +0x62 kubevirt.io/kubevirt/pkg/kubecli.(*vmis).Get(0xc420bd9ec0, 0xc4207a74e4, 0xc, 0xc42062c840, 0xc420bd9ec0, 0xc420080800, 0x8) /root/go/src/kubevirt.io/kubevirt/pkg/kubecli/vmi.go:369 +0x125 kubevirt.io/kubevirt/tests.waitForVMIStart.func1(0x0) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1014 +0xc2 reflect.Value.call(0x10c8b60, 0xc4206f19e0, 0x13, 0x12a74c2, 0x4, 0xc420b42cf0, 0x0, 0x0, 0x10c8b60, 0x10c8b60, ...) /gimme/.gimme/versions/go1.10.linux.amd64/src/reflect/value.go:447 +0x969 reflect.Value.Call(0x10c8b60, 0xc4206f19e0, 0x13, 0xc420b42cf0, 0x0, 0x0, 0x44b21b, 0xc42050c738, 0xc420b42d28) /gimme/.gimme/versions/go1.10.linux.amd64/src/reflect/value.go:308 +0xa4 kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion.(*AsyncAssertion).pollActual(0xc420829800, 0x0, 0x0, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go:71 +0x9f kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion.(*AsyncAssertion).match(0xc420829800, 0x13c29a0, 0xc4203f3a80, 0x412801, 0xc4203f3a90, 0x1, 0x1, 0xc4203f3a90) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go:141 +0x305 kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion.(*AsyncAssertion).Should(0xc420829800, 0x13c29a0, 0xc4203f3a80, 0xc4203f3a90, 0x1, 0x1, 0x13ba800) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go:48 +0x62 kubevirt.io/kubevirt/tests.waitForVMIStart(0x13b9580, 0xc42012ca00, 0x5a, 0x0, 0x0, 0x1c06201) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1024 +0x703 kubevirt.io/kubevirt/tests.WaitForSuccessfulVMIStart(0x13b9580, 0xc42012ca00, 0x1c06250, 0x0) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1047 +0x43 kubevirt.io/kubevirt/tests_test.glob..func14.4.2() /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:439 +0x197 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).runSync(0xc420708480, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go:113 +0x9c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).run(0xc420708480, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go:64 +0x13e kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*ItNode).Run(0xc4202cec60, 0x13b5fc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go:26 +0x7f kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec.(*Spec).runSample(0xc4208efa40, 0x0, 0x13b5fc0, 0xc4200e3480) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec/spec.go:203 +0x648 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec.(*Spec).Run(0xc4208efa40, 0x13b5fc0, 0xc4200e3480) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec/spec.go:138 +0xff kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpec(0xc420350500, 0xc4208efa40, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:200 +0x10d kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpecs(0xc420350500, 0x1) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:170 +0x329 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run(0xc420350500, 0xb) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:66 +0x11b kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/suite.(*Suite).Run(0xc4200e0b40, 0x7f6e736f1178, 0xc4205f51d0, 0x12af51b, 0xb, 0xc4208f4d60, 0x2, 0x2, 0x13d0ca0, 0xc4200e3480, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/suite/suite.go:62 +0x27c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo.RunSpecsWithCustomReporters(0x13b6d00, 0xc4205f51d0, 0x12af51b, 0xb, 0xc4208f4d40, 0x2, 0x2, 0x2) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go:221 +0x258 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo.RunSpecsWithDefaultAndCustomReporters(0x13b6d00, 0xc4205f51d0, 0x12af51b, 0xb, 0xc4205a7010, 0x1, 0x1, 0x1) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go:209 +0xab kubevirt.io/kubevirt/tests_test.TestTests(0xc4205f51d0) /root/go/src/kubevirt.io/kubevirt/tests/tests_suite_test.go:43 +0xaa testing.tRunner(0xc4205f51d0, 0x1339680) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:777 +0xd0 created by testing.(*T).Run /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:824 +0x2e0 goroutine 25 [chan receive, 90 minutes]: kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).registerForInterrupts(0xc420350500, 0xc4200c7320) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:223 +0xd1 created by kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:60 +0x88 goroutine 26 [select, 90 minutes, locked to thread]: runtime.gopark(0x133b4c0, 0x0, 0x12a9dde, 0x6, 0x18, 0x1) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/proc.go:291 +0x11a runtime.selectgo(0xc420472f50, 0xc4200c73e0) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/select.go:392 +0xe50 runtime.ensureSigM.func1() /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/signal_unix.go:549 +0x1f4 runtime.goexit() /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/asm_amd64.s:2361 +0x1 goroutine 43 [IO wait]: internal/poll.runtime_pollWait(0x7f6e736cdf00, 0x72, 0xc420837850) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/netpoll.go:173 +0x57 internal/poll.(*pollDesc).wait(0xc420630898, 0x72, 0xffffffffffffff00, 0x13b7ca0, 0x1ad8640) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_poll_runtime.go:85 +0x9b internal/poll.(*pollDesc).waitRead(0xc420630898, 0xc42070e000, 0x8000, 0x8000) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_poll_runtime.go:90 +0x3d internal/poll.(*FD).Read(0xc420630880, 0xc42070e000, 0x8000, 0x8000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_unix.go:157 +0x17d net.(*netFD).Read(0xc420630880, 0xc42070e000, 0x8000, 0x8000, 0x0, 0x8, 0x7ffb) /gimme/.gimme/versions/go1.10.linux.amd64/src/net/fd_unix.go:202 +0x4f net.(*conn).Read(0xc420838000, 0xc42070e000, 0x8000, 0x8000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/net/net.go:176 +0x6a crypto/tls.(*block).readFromUntil(0xc42063e1e0, 0x7f6e7362d158, 0xc420838000, 0x5, 0xc420838000, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:493 +0x96 crypto/tls.(*Conn).readRecord(0xc420024000, 0x133b617, 0xc420024120, 0x20) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:595 +0xe0 crypto/tls.(*Conn).Read(0xc420024000, 0xc420517000, 0x1000, 0x1000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:1156 +0x100 bufio.(*Reader).Read(0xc4208ec300, 0xc4205302d8, 0x9, 0x9, 0xc420ce3378, 0xc4208e6140, 0xc420837d10) /gimme/.gimme/versions/go1.10.linux.amd64/src/bufio/bufio.go:216 +0x238 io.ReadAtLeast(0x13b4e00, 0xc4208ec300, 0xc4205302d8, 0x9, 0x9, 0x9, 0xc420837ce0, 0xc420837ce0, 0x406614) /gimme/.gimme/versions/go1.10.linux.amd64/src/io/io.go:309 +0x86 io.ReadFull(0x13b4e00, 0xc4208ec300, 0xc4205302d8, 0x9, 0x9, 0xc420ce3320, 0xc420837d10, 0xc400005001) /gimme/.gimme/versions/go1.10.linux.amd64/src/io/io.go:327 +0x58 kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.readFrameHeader(0xc4205302d8, 0x9, 0x9, 0x13b4e00, 0xc4208ec300, 0x0, 0xc400000000, 0x7baa0d, 0xc420837fb0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/frame.go:237 +0x7b kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*Framer).ReadFrame(0xc4205302a0, 0xc420746e70, 0x0, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/frame.go:492 +0xa4 kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*clientConnReadLoop).run(0xc420837fb0, 0x133a5d8, 0xc4206377b0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:1428 +0x8e kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*ClientConn).readLoop(0xc420706000) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:1354 +0x76 created by kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*Transport).newClientConn /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:579 +0x651 make: *** [functest] Error 2 + make cluster-down ./cluster/down.sh