+ export WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + [[ openshift-3.10-release =~ openshift-.* ]] + [[ openshift-3.10-release =~ .*-crio-.* ]] + export KUBEVIRT_PROVIDER=os-3.10.0 + KUBEVIRT_PROVIDER=os-3.10.0 + export KUBEVIRT_NUM_NODES=2 + KUBEVIRT_NUM_NODES=2 + export NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + export NAMESPACE=kube-system + NAMESPACE=kube-system + trap '{ make cluster-down; }' EXIT SIGINT SIGTERM SIGSTOP + make cluster-down ./cluster/down.sh + make cluster-up ./cluster/up.sh Downloading ....... Downloading ....... Downloading ....... 2018/07/16 10:59:35 Waiting for host: 192.168.66.102:22 2018/07/16 10:59:38 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/16 10:59:46 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/16 10:59:54 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/16 10:59:59 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: connection refused. Sleeping 5s 2018/07/16 11:00:04 Connected to tcp://192.168.66.102:22 + systemctl stop origin-node.service + rm -rf /etc/origin/ /etc/etcd/ /var/lib/origin /var/lib/etcd/ ++ docker ps -q + containers= + '[' -n '' ']' ++ docker ps -q -a + containers='380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8' + '[' -n '380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8' ']' + docker rm -f 380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8 380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8 2018/07/16 11:00:06 Waiting for host: 192.168.66.101:22 2018/07/16 11:00:09 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/16 11:00:17 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/16 11:00:25 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/16 11:00:33 Connected to tcp://192.168.66.101:22 + inventory_file=/root/inventory + openshift_ansible=/root/openshift-ansible + echo '[new_nodes]' + sed -i '/\[OSEv3:children\]/a new_nodes' /root/inventory + nodes_found=false ++ seq 2 100 + for i in '$(seq 2 100)' ++ printf node%02d 2 + node=node02 ++ printf 192.168.66.1%02d 2 + node_ip=192.168.66.102 + set +e + ping 192.168.66.102 -c 1 PING 192.168.66.102 (192.168.66.102) 56(84) bytes of data. 64 bytes from 192.168.66.102: icmp_seq=1 ttl=64 time=2.93 ms --- 192.168.66.102 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 2.933/2.933/2.933/0.000 ms Found node02. Adding it to the inventory. + '[' 0 -ne 0 ']' + nodes_found=true + set -e + echo '192.168.66.102 node02' + echo 'Found node02. Adding it to the inventory.' + echo 'node02 openshift_node_group_name="node-config-compute" openshift_schedulable=true openshift_ip=192.168.66.102' + for i in '$(seq 2 100)' ++ printf node%02d 3 + node=node03 ++ printf 192.168.66.1%02d 3 + node_ip=192.168.66.103 + set +e + ping 192.168.66.103 -c 1 PING 192.168.66.103 (192.168.66.103) 56(84) bytes of data. From 192.168.66.101 icmp_seq=1 Destination Host Unreachable --- 192.168.66.103 ping statistics --- 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms + '[' 1 -ne 0 ']' + break + '[' true = true ']' + ansible-playbook -i /root/inventory /root/openshift-ansible/playbooks/openshift-node/scaleup.yml PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) PLAY [Ensure there are new_nodes] ********************************************** TASK [fail] ******************************************************************** skipping: [localhost] TASK [fail] ******************************************************************** skipping: [localhost] PLAY [Initialization Checkpoint Start] ***************************************** TASK [Set install initialization 'In Progress'] ******************************** ok: [node01] PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) [WARNING]: Could not match supplied host pattern, ignoring: oo_lb_to_config PLAY [Ensure that all non-node hosts are accessible] *************************** TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Initialize basic host facts] ********************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/deprecations.yml for node01, node02 TASK [openshift_sanitize_inventory : Check for usage of deprecated variables] *** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : debug] ************************************ skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_stats] ******************************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Assign deprecated variables to correct counterparts] *** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml for node01, node02 included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml for node01, node02 TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Standardize on latest variable names] ***** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Normalize openshift_release] ************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/unsupported.yml for node01, node02 TASK [openshift_sanitize_inventory : Ensure that openshift_use_dnsmasq is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that openshift_node_dnsmasq_install_network_manager_hook is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* skipping: [node01] => (item=openshift_hosted_etcd_storage_kind) skipping: [node02] => (item=openshift_hosted_etcd_storage_kind) TASK [openshift_sanitize_inventory : Ensure that dynamic provisioning is set if using dynamic storage] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure clusterid is set along with the cloudprovider] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that all requires vsphere configuration variables are set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : ensure provider configuration variables are defined] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure removed web console extension variables are not set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that web console port matches API server port] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : At least one master is schedulable] ******* skipping: [node01] skipping: [node02] TASK [Detecting Operating System from ostree_booted] *************************** ok: [node02] ok: [node01] TASK [set openshift_deployment_type if unset] ********************************** skipping: [node01] skipping: [node02] TASK [check for node already bootstrapped] ************************************* ok: [node01] ok: [node02] TASK [initialize_facts set fact openshift_is_bootstrapped] ********************* ok: [node01] ok: [node02] TASK [initialize_facts set fact openshift_is_atomic and openshift_is_containerized] *** ok: [node01] ok: [node02] TASK [Determine Atomic Host Docker Version] ************************************ skipping: [node01] skipping: [node02] TASK [assert atomic host docker version is 1.12 or later] ********************** skipping: [node01] skipping: [node02] PLAY [Retrieve existing master configs and validate] *************************** TASK [openshift_control_plane : stat] ****************************************** ok: [node01] TASK [openshift_control_plane : slurp] ***************************************** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [openshift_control_plane : Check for file paths outside of /etc/origin/master in master's config] *** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Initialize special first-master variables] ******************************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Disable web console if required] ***************************************** TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Setup yum repositories for all hosts] ************************************ TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Install Red Hat Subscription manager] ******************* skipping: [node02] TASK [rhel_subscribe : Is host already registered?] **************************** skipping: [node02] TASK [rhel_subscribe : Register host] ****************************************** skipping: [node02] TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Determine if OpenShift Pool Already Attached] *********** skipping: [node02] TASK [rhel_subscribe : Attach to OpenShift Pool] ******************************* skipping: [node02] TASK [rhel_subscribe : Satellite preparation] ********************************** skipping: [node02] TASK [openshift_repos : openshift_repos detect ostree] ************************* ok: [node02] TASK [openshift_repos : Ensure libselinux-python is installed] ***************** ok: [node02] TASK [openshift_repos : Remove openshift_additional.repo file] ***************** ok: [node02] TASK [openshift_repos : Create any additional repos that are defined] ********** TASK [openshift_repos : include_tasks] ***************************************** skipping: [node02] TASK [openshift_repos : include_tasks] ***************************************** included: /root/openshift-ansible/roles/openshift_repos/tasks/centos_repos.yml for node02 TASK [openshift_repos : Configure origin gpg keys] ***************************** ok: [node02] TASK [openshift_repos : Configure correct origin release repository] *********** ok: [node02] => (item=/root/openshift-ansible/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2) TASK [openshift_repos : Ensure clean repo cache in the event repos have been changed manually] *** changed: [node02] => { "msg": "First run of openshift_repos" } TASK [openshift_repos : Record that openshift_repos already ran] *************** ok: [node02] RUNNING HANDLER [openshift_repos : refresh cache] ****************************** changed: [node02] PLAY [Install packages necessary for installer] ******************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Determine if chrony is installed] **************************************** [WARNING]: Consider using the yum, dnf or zypper module rather than running rpm. If you need to use command because yum, dnf or zypper is insufficient you can add warn=False to this command task or set command_warnings=False in ansible.cfg to get rid of this message. changed: [node02] TASK [Install ntp package] ***************************************************** skipping: [node02] TASK [Start and enable ntpd/chronyd] ******************************************* changed: [node02] TASK [Ensure openshift-ansible installer package deps are installed] *********** ok: [node02] => (item=iproute) ok: [node02] => (item=dbus-python) ok: [node02] => (item=PyYAML) ok: [node02] => (item=python-ipaddress) ok: [node02] => (item=libsemanage-python) ok: [node02] => (item=yum-utils) ok: [node02] => (item=python-docker) PLAY [Initialize cluster facts] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [get openshift_current_version] ******************************************* ok: [node02] ok: [node01] TASK [set_fact openshift_portal_net if present on masters] ********************* ok: [node01] ok: [node02] TASK [Gather Cluster facts] **************************************************** changed: [node02] changed: [node01] TASK [Set fact of no_proxy_internal_hostnames] ********************************* skipping: [node01] skipping: [node02] TASK [Initialize openshift.node.sdn_mtu] *************************************** changed: [node02] ok: [node01] PLAY [Initialize etcd host variables] ****************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role] ************************************************************ TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** ok: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] => { "msg": "openshift_pkg_version was not defined. Falling back to -3.10.0" } TASK [openshift_version : set_fact] ******************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Verify Requirements] ***************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [Run variable sanity checks] ********************************************** ok: [node01] TASK [Validate openshift_node_groups and openshift_node_group_name] ************ ok: [node01] PLAY [Initialization Checkpoint End] ******************************************* TASK [Set install initialization 'Complete'] *********************************** ok: [node01] PLAY [Validate node hostnames] ************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Query DNS for IP address of node02] ************************************** ok: [node02] TASK [Validate openshift_hostname when defined] ******************************** skipping: [node02] TASK [Validate openshift_ip exists on node when defined] *********************** skipping: [node02] PLAY [Configure os_firewall] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [os_firewall : Detecting Atomic Host Operating System] ******************** ok: [node02] TASK [os_firewall : Set fact r_os_firewall_is_atomic] ************************** ok: [node02] TASK [os_firewall : Fail - Firewalld is not supported on Atomic Host] ********** skipping: [node02] TASK [os_firewall : Install firewalld packages] ******************************** skipping: [node02] TASK [os_firewall : Ensure iptables services are not enabled] ****************** skipping: [node02] => (item=iptables) skipping: [node02] => (item=ip6tables) TASK [os_firewall : Wait 10 seconds after disabling iptables] ****************** skipping: [node02] TASK [os_firewall : Start and enable firewalld service] ************************ skipping: [node02] TASK [os_firewall : need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail] *** skipping: [node02] TASK [os_firewall : Restart polkitd] ******************************************* skipping: [node02] TASK [os_firewall : Wait for polkit action to have been created] *************** skipping: [node02] TASK [os_firewall : Ensure firewalld service is not enabled] ******************* ok: [node02] TASK [os_firewall : Wait 10 seconds after disabling firewalld] ***************** skipping: [node02] TASK [os_firewall : Install iptables packages] ********************************* ok: [node02] => (item=iptables) ok: [node02] => (item=iptables-services) TASK [os_firewall : Start and enable iptables service] ************************* ok: [node02 -> node02] => (item=node02) TASK [os_firewall : need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail] *** skipping: [node02] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [container_runtime : Setup the docker-storage for overlay] **************** skipping: [node02] TASK [container_runtime : Create file system on extra volume device] *********** TASK [container_runtime : Create mount entry for extra volume] ***************** PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** ok: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* ok: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Add http_proxy to /etc/atomic.conf] ****************** skipping: [node02] TASK [container_runtime : Add https_proxy to /etc/atomic.conf] ***************** skipping: [node02] TASK [container_runtime : Add no_proxy to /etc/atomic.conf] ******************** skipping: [node02] TASK [container_runtime : Get current installed Docker version] **************** ok: [node02] TASK [container_runtime : Error out if Docker pre-installed but too old] ******* skipping: [node02] TASK [container_runtime : Error out if requested Docker is too old] ************ skipping: [node02] TASK [container_runtime : Install Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure docker.service.d directory exists] ************ ok: [node02] TASK [container_runtime : Configure Docker service unit file] ****************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Set registry params] ********************************* skipping: [node02] => (item={u'reg_conf_var': u'ADD_REGISTRY', u'reg_flag': u'--add-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'BLOCK_REGISTRY', u'reg_flag': u'--block-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'INSECURE_REGISTRY', u'reg_flag': u'--insecure-registry', u'reg_fact_val': []}) TASK [container_runtime : Place additional/blocked/insecure registries in /etc/containers/registries.conf] *** skipping: [node02] TASK [container_runtime : Set Proxy Settings] ********************************** skipping: [node02] => (item={u'reg_conf_var': u'HTTP_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'HTTPS_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'NO_PROXY', u'reg_fact_val': u''}) TASK [container_runtime : Set various Docker options] ************************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Configure Docker Network OPTIONS] ******************** ok: [node02] TASK [container_runtime : Detect if docker is already started] ***************** ok: [node02] TASK [container_runtime : Start the Docker service] **************************** ok: [node02] TASK [container_runtime : set_fact] ******************************************** ok: [node02] TASK [container_runtime : Check for docker_storage_path/overlay2] ************** ok: [node02] TASK [container_runtime : Fixup SELinux permissions for docker] **************** changed: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] TASK [container_runtime : Fail if Atomic Host since this is an rpm request] **** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Check that overlay is in the kernel] ***************** skipping: [node02] TASK [container_runtime : Add overlay to modprobe.d] *************************** skipping: [node02] TASK [container_runtime : Manually modprobe overlay into the kernel] *********** skipping: [node02] TASK [container_runtime : Enable and start systemd-modules-load] *************** skipping: [node02] TASK [container_runtime : Install cri-o] *************************************** skipping: [node02] TASK [container_runtime : Remove CRI-O default configuration files] ************ skipping: [node02] => (item=/etc/cni/net.d/200-loopback.conf) skipping: [node02] => (item=/etc/cni/net.d/100-crio-bridge.conf) TASK [container_runtime : Create the CRI-O configuration] ********************** skipping: [node02] TASK [container_runtime : Ensure CNI configuration directory exists] *********** skipping: [node02] TASK [container_runtime : Add iptables allow rules] **************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove iptables rules] ******************************* TASK [container_runtime : Add firewalld allow rules] *************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove firewalld allow rules] ************************ TASK [container_runtime : Configure the CNI network] *************************** skipping: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-network] ****************** skipping: [node02] TASK [container_runtime : Start the CRI-O service] ***************************** skipping: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* skipping: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** skipping: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** skipping: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role] ************************************************************ TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** skipping: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Node Preparation Checkpoint Start] *************************************** TASK [Set Node preparation 'In Progress'] ************************************** ok: [node01] PLAY [Only target nodes that have not yet been bootstrapped] ******************* TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [add_host] **************************************************************** skipping: [localhost] => (item=node02) ok: [localhost] => (item=node01) PLAY [Disable excluders] ******************************************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/disable.yml for node02 TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-docker-excluder version detected] ************ skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-docker-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-excluder version detected] ******************* skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** skipping: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] PLAY [Configure nodes] ********************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_cloud_provider : Set cloud provider facts] ********************* skipping: [node02] TASK [openshift_cloud_provider : Create cloudprovider config dir] ************** skipping: [node02] TASK [openshift_cloud_provider : include the defined cloud provider files] ***** skipping: [node02] TASK [openshift_node : fail] *************************************************** skipping: [node02] TASK [openshift_node : Check for NetworkManager service] *********************** ok: [node02] TASK [openshift_node : Set fact using_network_manager] ************************* ok: [node02] TASK [openshift_node : Install dnsmasq] **************************************** ok: [node02] TASK [openshift_node : ensure origin/node directory exists] ******************** changed: [node02] => (item=/etc/origin) changed: [node02] => (item=/etc/origin/node) TASK [openshift_node : Install NetworkManager during node_bootstrap provisioning] *** skipping: [node02] TASK [openshift_node : Install network manager dispatch script] **************** skipping: [node02] TASK [openshift_node : Install dnsmasq configuration] ************************** ok: [node02] TASK [openshift_node : Deploy additional dnsmasq.conf] ************************* skipping: [node02] TASK [openshift_node : Enable dnsmasq] ***************************************** ok: [node02] TASK [openshift_node : Install network manager dispatch script] **************** ok: [node02] TASK [openshift_node : Add iptables allow rules] ******************************* ok: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) ok: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) ok: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) ok: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) ok: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove iptables rules] ********************************** TASK [openshift_node : Add firewalld allow rules] ****************************** skipping: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) skipping: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) skipping: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) skipping: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) skipping: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove firewalld allow rules] *************************** TASK [openshift_node : Checking for journald.conf] ***************************** ok: [node02] TASK [openshift_node : Create journald persistence directories] **************** ok: [node02] TASK [openshift_node : Update journald setup] ********************************** ok: [node02] => (item={u'var': u'Storage', u'val': u'persistent'}) ok: [node02] => (item={u'var': u'Compress', u'val': True}) ok: [node02] => (item={u'var': u'SyncIntervalSec', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitInterval', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitBurst', u'val': 10000}) ok: [node02] => (item={u'var': u'SystemMaxUse', u'val': u'8G'}) ok: [node02] => (item={u'var': u'SystemKeepFree', u'val': u'20%'}) ok: [node02] => (item={u'var': u'SystemMaxFileSize', u'val': u'10M'}) ok: [node02] => (item={u'var': u'MaxRetentionSec', u'val': u'1month'}) ok: [node02] => (item={u'var': u'MaxFileSec', u'val': u'1day'}) ok: [node02] => (item={u'var': u'ForwardToSyslog', u'val': False}) ok: [node02] => (item={u'var': u'ForwardToWall', u'val': False}) TASK [openshift_node : Restart journald] *************************************** skipping: [node02] TASK [openshift_node : Disable swap] ******************************************* ok: [node02] TASK [openshift_node : Install node, clients, and conntrack packages] ********** ok: [node02] => (item={u'name': u'origin-node-3.10.0*'}) ok: [node02] => (item={u'name': u'origin-clients-3.10.0*'}) ok: [node02] => (item={u'name': u'conntrack-tools'}) TASK [openshift_node : Restart cri-o] ****************************************** skipping: [node02] TASK [openshift_node : restart NetworkManager to ensure resolv.conf is present] *** changed: [node02] TASK [openshift_node : sysctl] ************************************************* ok: [node02] TASK [openshift_node : Check for credentials file for registry auth] *********** skipping: [node02] TASK [openshift_node : Create credentials for registry auth] ******************* skipping: [node02] TASK [openshift_node : Create credentials for registry auth (alternative)] ***** skipping: [node02] TASK [openshift_node : Setup ro mount of /root/.docker for containerized hosts] *** skipping: [node02] TASK [openshift_node : Check that node image is present] *********************** changed: [node02] TASK [openshift_node : Pre-pull node image] ************************************ skipping: [node02] TASK [openshift_node : Copy node script to the node] *************************** ok: [node02] TASK [openshift_node : Install Node service file] ****************************** ok: [node02] TASK [openshift_node : Ensure old system path is set] ************************** skipping: [node02] => (item=/etc/origin/openvswitch) skipping: [node02] => (item=/var/lib/kubelet) skipping: [node02] => (item=/opt/cni/bin) TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Copy node container image to ostree storage] ************ skipping: [node02] TASK [openshift_node : Install or Update node system container] **************** skipping: [node02] TASK [openshift_node : Restart network manager to ensure networking configuration is in place] *** skipping: [node02] TASK [openshift_node : Configure Node settings] ******************************** ok: [node02] => (item={u'regex': u'^OPTIONS=', u'line': u'OPTIONS='}) ok: [node02] => (item={u'regex': u'^DEBUG_LOGLEVEL=', u'line': u'DEBUG_LOGLEVEL=2'}) ok: [node02] => (item={u'regex': u'^IMAGE_VERSION=', u'line': u'IMAGE_VERSION=v3.10.0-rc.0'}) TASK [openshift_node : Configure Proxy Settings] ******************************* skipping: [node02] => (item={u'regex': u'^HTTP_PROXY=', u'line': u'HTTP_PROXY='}) skipping: [node02] => (item={u'regex': u'^HTTPS_PROXY=', u'line': u'HTTPS_PROXY='}) skipping: [node02] => (item={u'regex': u'^NO_PROXY=', u'line': u'NO_PROXY=[],172.30.0.0/16,10.128.0.0/14'}) TASK [openshift_node : file] *************************************************** skipping: [node02] TASK [openshift_node : Create the Node config] ********************************* changed: [node02] TASK [openshift_node : Configure Node Environment Variables] ******************* TASK [openshift_node : Ensure the node static pod directory exists] ************ changed: [node02] TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Install NFS storage plugin dependencies] **************** ok: [node02] TASK [openshift_node : Check for existence of nfs sebooleans] ****************** ok: [node02] => (item=virt_use_nfs) ok: [node02] => (item=virt_sandbox_use_nfs) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-16 11:08:13.519429', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.013856', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-16 11:08:13.505573', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-16 11:08:15.041254', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.012480', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-16 11:08:15.028774', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-16 11:08:13.519429', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.013856', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-16 11:08:13.505573', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-16 11:08:15.041254', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.012480', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-16 11:08:15.028774', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install GlusterFS storage plugin dependencies] ********** ok: [node02] TASK [openshift_node : Check for existence of fusefs sebooleans] *************** ok: [node02] => (item=virt_use_fusefs) ok: [node02] => (item=virt_sandbox_use_fusefs) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-16 11:08:22.020505', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.006713', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-16 11:08:22.013792', '_ansible_ignore_errors': None, 'failed': False}) ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-16 11:08:23.307656', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.012787', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-16 11:08:23.294869', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-16 11:08:22.020505', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.006713', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-16 11:08:22.013792', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-16 11:08:23.307656', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.012787', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-16 11:08:23.294869', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install Ceph storage plugin dependencies] *************** ok: [node02] TASK [openshift_node : Install iSCSI storage plugin dependencies] ************** ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=device-mapper-multipath) TASK [openshift_node : restart services] *************************************** ok: [node02] => (item=multipathd) ok: [node02] => (item=rpcbind) ok: [node02] => (item=iscsid) TASK [openshift_node : Template multipath configuration] *********************** changed: [node02] TASK [openshift_node : Enable and start multipath] ***************************** changed: [node02] TASK [tuned : Check for tuned package] ***************************************** ok: [node02] TASK [tuned : Set tuned OpenShift variables] *********************************** ok: [node02] TASK [tuned : Ensure directory structure exists] ******************************* ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': u's0', 'seuser': u'unconfined_u', 'serole': u'object_r', 'ctime': 1529614575.1009853, 'state': u'file', 'gid': 0, 'mode': u'0644', 'mtime': 1529614575.1009853, 'owner': u'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': u'admin_home_t'}) TASK [tuned : Ensure files are populated from templates] *********************** skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': u's0', 'seuser': u'unconfined_u', 'serole': u'object_r', 'ctime': 1529614575.1009853, 'state': u'file', 'gid': 0, 'mode': u'0644', 'mtime': 1529614575.1009853, 'owner': u'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': u'admin_home_t'}) TASK [tuned : Make tuned use the recommended tuned profile on restart] ********* changed: [node02] => (item=/etc/tuned/active_profile) changed: [node02] => (item=/etc/tuned/profile_mode) TASK [tuned : Restart tuned service] ******************************************* changed: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Install logrotate] ******* ok: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Setup logrotate.d scripts] *** PLAY [node bootstrap config] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_node : install needed rpm(s)] ********************************** ok: [node02] => (item=origin-node) ok: [node02] => (item=origin-docker-excluder) ok: [node02] => (item=ansible) ok: [node02] => (item=bash-completion) ok: [node02] => (item=docker) ok: [node02] => (item=haproxy) ok: [node02] => (item=dnsmasq) ok: [node02] => (item=ntp) ok: [node02] => (item=logrotate) ok: [node02] => (item=httpd-tools) ok: [node02] => (item=bind-utils) ok: [node02] => (item=firewalld) ok: [node02] => (item=libselinux-python) ok: [node02] => (item=conntrack-tools) ok: [node02] => (item=openssl) ok: [node02] => (item=iproute) ok: [node02] => (item=python-dbus) ok: [node02] => (item=PyYAML) ok: [node02] => (item=yum-utils) ok: [node02] => (item=glusterfs-fuse) ok: [node02] => (item=device-mapper-multipath) ok: [node02] => (item=nfs-utils) ok: [node02] => (item=cockpit-ws) ok: [node02] => (item=cockpit-system) ok: [node02] => (item=cockpit-bridge) ok: [node02] => (item=cockpit-docker) ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=ceph-common) TASK [openshift_node : create the directory for node] ************************** skipping: [node02] TASK [openshift_node : laydown systemd override] ******************************* skipping: [node02] TASK [openshift_node : update the sysconfig to have necessary variables] ******* ok: [node02] => (item={u'regexp': u'^KUBECONFIG=.*', u'line': u'KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig'}) TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : disable origin-node service] **************************** changed: [node02] => (item=origin-node.service) TASK [openshift_node : Check for RPM generated config marker file .config_managed] *** ok: [node02] TASK [openshift_node : create directories for bootstrapping] ******************* ok: [node02] => (item=/root/openshift_bootstrap) changed: [node02] => (item=/var/lib/origin/openshift.local.config) changed: [node02] => (item=/var/lib/origin/openshift.local.config/node) ok: [node02] => (item=/etc/docker/certs.d/docker-registry.default.svc:5000) TASK [openshift_node : laydown the bootstrap.yml file for on boot configuration] *** ok: [node02] TASK [openshift_node : Create a symlink to the node client CA for the docker registry] *** ok: [node02] TASK [openshift_node : Remove RPM generated config files if present] *********** skipping: [node02] => (item=master) skipping: [node02] => (item=.config_managed) TASK [openshift_node : find all files in /etc/origin/node so we can remove them] *** skipping: [node02] TASK [openshift_node : Remove everything except the resolv.conf required for node] *** skipping: [node02] TASK [openshift_node_group : create node config template] ********************** changed: [node02] TASK [openshift_node_group : remove existing node config] ********************** changed: [node02] TASK [openshift_node_group : Ensure required directories are present] ********** ok: [node02] => (item=/etc/origin/node/pods) changed: [node02] => (item=/etc/origin/node/certificates) TASK [openshift_node_group : Update the sysconfig to group "node-config-compute"] *** changed: [node02] TASK [set_fact] **************************************************************** ok: [node02] PLAY [Re-enable excluder if it was previously enabled] ************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/enable.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] PLAY [Node Preparation Checkpoint End] ***************************************** TASK [Set Node preparation 'Complete'] ***************************************** ok: [node01] PLAY [Distribute bootstrap and start nodes] ************************************ TASK [openshift_node : Gather node information] ******************************** changed: [node02] ok: [node01] TASK [openshift_node : Copy master bootstrap config locally] ******************* ok: [node02] TASK [openshift_node : Distribute bootstrap kubeconfig if one does not exist] *** ok: [node01] changed: [node02] TASK [openshift_node : Start and enable node for bootstrapping] **************** changed: [node01] changed: [node02] TASK [openshift_node : Get node logs] ****************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : debug] ************************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : fail] *************************************************** skipping: [node02] skipping: [node01] PLAY [Approve any pending CSR requests from inventory nodes] ******************* TASK [Dump all candidate bootstrap hostnames] ********************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Find all hostnames for bootstrapping] ************************************ ok: [node01] TASK [Dump the bootstrap hostnames] ******************************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Approve bootstrap nodes] ************************************************* changed: [node01] TASK [Get CSRs] **************************************************************** skipping: [node01] TASK [Report approval errors] ************************************************** skipping: [node01] PLAY [Ensure any inventory labels are applied to the nodes] ******************** TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_manage_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_manage_node : Wait for Node Registration] ********************** ok: [node02 -> node01] ok: [node01 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/config.yml for node02, node01 TASK [openshift_manage_node : Set node schedulability] ************************* ok: [node01 -> node01] ok: [node02 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/set_default_node_role.yml for node02, node01 TASK [openshift_manage_node : Retrieve nodes that are marked with the infra selector or the legacy infra selector] *** ok: [node02 -> node01] TASK [openshift_manage_node : Label infra or legacy infra nodes with the new role label] *** TASK [openshift_manage_node : Retrieve non-infra, non-master nodes that are not yet labeled compute] *** ok: [node02 -> node01] TASK [openshift_manage_node : label non-master non-infra nodes compute] ******** TASK [openshift_manage_node : Label all-in-one master as a compute node] ******* skipping: [node02] PLAY RECAP ********************************************************************* localhost : ok=30 changed=0 unreachable=0 failed=0 node01 : ok=71 changed=3 unreachable=0 failed=0 node02 : ok=155 changed=33 unreachable=0 failed=0 INSTALLER STATUS *************************************************************** Initialization : Complete (0:03:32) Node Preparation : Complete (0:04:44) Sending file modes: C0755 110489328 oc Sending file modes: C0600 5645 admin.kubeconfig Cluster "node01:8443" set. Cluster "node01:8443" set. + set +e + kubectl get nodes --no-headers + cluster/kubectl.sh get nodes --no-headers node01 Ready compute,infra,master 24d v1.10.0+b81c8f8 node02 Ready compute 1m v1.10.0+b81c8f8 + kubectl_rc=0 + '[' 0 -ne 0 ']' ++ kubectl get nodes --no-headers ++ cluster/kubectl.sh get nodes --no-headers ++ grep NotReady + '[' -n '' ']' + set -e + echo 'Nodes are ready:' Nodes are ready: + kubectl get nodes + cluster/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION node01 Ready compute,infra,master 24d v1.10.0+b81c8f8 node02 Ready compute 1m v1.10.0+b81c8f8 + make cluster-sync ./cluster/build.sh Building ... Untagged: localhost:33381/kubevirt/virt-controller:devel Untagged: localhost:33381/kubevirt/virt-controller@sha256:748bf9da445bfe033c4059bed5c8d6d991405e86522585ef1693128cf40ee537 Deleted: sha256:430b189d14179abf03cff292d143e9c91926a100e9da2060afaf145660ee6667 Deleted: sha256:1c5a117966e546eec4aa631f90f4a68740978fc4d6cc84f2dfc746e75fe65bb1 Deleted: sha256:eb9f1603a0364d0af43f2db1062c7ee64bcdfec68dd57660186cb205ac8a5d7f Deleted: sha256:9d40da0159c8e7cf7d7260898eb66633ba6740eb4bbb4f00a9aa182992c6d69e Untagged: localhost:33381/kubevirt/virt-launcher:devel Untagged: localhost:33381/kubevirt/virt-launcher@sha256:0ac56a2f594bdebc84ea946b192dc85ef7ef796964c59dc9c0c9e0f53f715222 Deleted: sha256:638c0187171aa48352c006f8fafb08c6656123d1d929bf4c865dcd1c93d27df9 Deleted: sha256:b4ef6df91b462e22501fd23711662e3de7c79cd89d1290520b3b182dcac06e4c Deleted: sha256:54454350521f3a5722fbb6353460af902f2d3180ab4dae1e5eccb8228cec4669 Deleted: sha256:500f844f4bbda3578a5836d583c5acab74f6fd1eff897127d14059a02ddfa2be Deleted: sha256:ab19e27b9e618b5864ccf1cf376cae3071003af26c4ae5ba1bf69a09f0ce310d Deleted: sha256:dcbb1f8fa5d936e738cba5cb1f314f0064a238cb47c1f20016ab889c1c57ab52 Deleted: sha256:819839b19d808e06ab4e848eb5915c2119258282d94febd18c7c8e07e44377aa Deleted: sha256:ae290e04ebdf1d93764fa2e5d74d4bedc7319a76d4161755c1dc21381e177429 Deleted: sha256:281f6f042be7a3af5270cf7a516196a1005dbfd5c2c2b45fb2e23a70069e9038 Deleted: sha256:71b80c4fd3818ef550885f2df3cbf1fe31e2a89ca3671073922b3d10dc6ca5d0 Deleted: sha256:ea9fa573f2fa78bd7952fcfa259160039e172a7fb90c63b6bdac5d4068ba2702 Deleted: sha256:1708e67d29cb5d3a69489d99eb9a6137a3282ff7aa40b849f8c633fcec89b833 Untagged: localhost:33381/kubevirt/virt-handler:devel Untagged: localhost:33381/kubevirt/virt-handler@sha256:6e75fb2b1aa586fcc9175848150447a28a463e2070e50c8ab0212a8ea5434d48 Deleted: sha256:0c19cc464a4ec3d20e0aa28b8b2222ac1483ae37bed9d4d02d443301d31eca5a Deleted: sha256:1f8d1bf173341a3a2e3fd39c11c93d4db758413fab91c0c21ad37e113df00d03 Deleted: sha256:876e4b6ef9d7dbdf4a8a40b540f94b707476133a7284a8d35aeb7193a38a8745 Deleted: sha256:8959c0d690c38b26cf6f1b39366482153226c8074b9f9c67a0d20614bb8c802e Untagged: localhost:33381/kubevirt/virt-api:devel Untagged: localhost:33381/kubevirt/virt-api@sha256:dad7d8b9c720426779f02d28929e116cc6699fbedb752c463cfe4fa30246aeee Deleted: sha256:0327fec97a661b065f1f27cd423d02b17ee7f1c311b5dcc461915b3e37fd35e5 Deleted: sha256:5c5851727768c5bd6dc068fda9a54855a4ed669e9b4aa88372d0f2a472d4cac8 Deleted: sha256:0db20e6104508adb8e49942bbf6575f6322ce383f165e2f892ed0104219c1bc9 Deleted: sha256:dd77362fede5fce32158350d40c0886c0c87226e663f2e08706eea149d094936 Untagged: localhost:33381/kubevirt/subresource-access-test:devel Untagged: localhost:33381/kubevirt/subresource-access-test@sha256:d1345cceffac7db4c63168830a68c6275595286b06b551ca6708f0353cb86cfc Deleted: sha256:f5dfcf44480c25f2e9112d5db0e416062803ff153d879a1434356b49ff264c9b Deleted: sha256:d0825e48462ef7d1da15d25908f7244d9da479f861f12fc290747e939cd5b84e Deleted: sha256:7590dec6cedeeb7c11b45e4b5fa353f0546be6ee586d4ad05d4143c0b49a4073 Deleted: sha256:0c1cd5d40af7cc6eeec088cd636fbd4047875c3354d070a5251d24f69d7c5e51 sha256:380a1cbdbe840bfd69bc431427e8cd6a0b9dd9815c87fb88c0d177c6886cc53b go version go1.10 linux/amd64 go version go1.10 linux/amd64 make[1]: Entering directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' hack/dockerized "./hack/check.sh && KUBEVIRT_VERSION= ./hack/build-go.sh install " && ./hack/build-copy-artifacts.sh sha256:380a1cbdbe840bfd69bc431427e8cd6a0b9dd9815c87fb88c0d177c6886cc53b go version go1.10 linux/amd64 go version go1.10 linux/amd64 find: '/root/go/src/kubevirt.io/kubevirt/_out/cmd': No such file or directory Compiling tests... compiled tests.test hack/build-docker.sh build Sending build context to Docker daemon 38.11 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b730b4ed65df Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-controller ---> Using cache ---> 9f795041856b Step 4/8 : WORKDIR /home/virt-controller ---> Using cache ---> d0ce5fa56fcc Step 5/8 : USER 1001 ---> Using cache ---> bf13501cba5c Step 6/8 : COPY virt-controller /usr/bin/virt-controller ---> 10a8b460ef38 Removing intermediate container ceed9b500053 Step 7/8 : ENTRYPOINT /usr/bin/virt-controller ---> Running in c632bfba46af ---> e7035a5a0c43 Removing intermediate container c632bfba46af Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-controller" '' ---> Running in 8b4e8a758954 ---> 5ada3528ee4d Removing intermediate container 8b4e8a758954 Successfully built 5ada3528ee4d Sending build context to Docker daemon 40.44 MB Step 1/10 : FROM kubevirt/libvirt:3.7.0 ---> c4e262d2dc3c Step 2/10 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 11c19af44f99 Step 3/10 : RUN dnf -y install socat genisoimage util-linux libcgroup-tools ethtool net-tools sudo && dnf -y clean all && test $(id -u qemu) = 107 # make sure that the qemu user really is 107 ---> Using cache ---> 736e39e6f88b Step 4/10 : COPY virt-launcher /usr/bin/virt-launcher ---> 25640030108f Removing intermediate container f95767574d6e Step 5/10 : COPY kubevirt-sudo /etc/sudoers.d/kubevirt ---> 2738a6c1c042 Removing intermediate container 6bd106fa4919 Step 6/10 : RUN setcap CAP_NET_BIND_SERVICE=+eip /usr/bin/qemu-system-x86_64 ---> Running in 73d163bf4260  ---> 903c7c96ca3c Removing intermediate container 73d163bf4260 Step 7/10 : RUN mkdir -p /usr/share/kubevirt/virt-launcher ---> Running in 5c9f61b4a9dd  ---> 7583bdc946bf Removing intermediate container 5c9f61b4a9dd Step 8/10 : COPY entrypoint.sh libvirtd.sh sock-connector /usr/share/kubevirt/virt-launcher/ ---> cc34346ad031 Removing intermediate container 4f615a8a7fa8 Step 9/10 : ENTRYPOINT /usr/share/kubevirt/virt-launcher/entrypoint.sh ---> Running in d691da5cf324 ---> faa341f794ea Removing intermediate container d691da5cf324 Step 10/10 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-launcher" '' ---> Running in 8bc0e5991312 ---> 669e2bb0c964 Removing intermediate container 8bc0e5991312 Successfully built 669e2bb0c964 Sending build context to Docker daemon 39.56 MB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b730b4ed65df Step 3/5 : COPY virt-handler /usr/bin/virt-handler ---> 8a8a78643bf8 Removing intermediate container be95ff441d6a Step 4/5 : ENTRYPOINT /usr/bin/virt-handler ---> Running in d0a2f1079fdd ---> af6c8104b200 Removing intermediate container d0a2f1079fdd Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-handler" '' ---> Running in 0ac8c0d1d214 ---> 69e4a679f348 Removing intermediate container 0ac8c0d1d214 Successfully built 69e4a679f348 Sending build context to Docker daemon 37.02 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b730b4ed65df Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-api ---> Using cache ---> 5c02e471f3c3 Step 4/8 : WORKDIR /home/virt-api ---> Using cache ---> aee004e37b06 Step 5/8 : USER 1001 ---> Using cache ---> 503600570331 Step 6/8 : COPY virt-api /usr/bin/virt-api ---> 3a33a5551293 Removing intermediate container ae624ad81bb7 Step 7/8 : ENTRYPOINT /usr/bin/virt-api ---> Running in 27dad6628ab0 ---> c70a6c404265 Removing intermediate container 27dad6628ab0 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-api" '' ---> Running in 7798610c7a77 ---> fab3a4f113f4 Removing intermediate container 7798610c7a77 Successfully built fab3a4f113f4 Sending build context to Docker daemon 4.096 kB Step 1/7 : FROM fedora:27 ---> 9110ae7f579f Step 2/7 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b730b4ed65df Step 3/7 : ENV container docker ---> Using cache ---> f899e4585fe3 Step 4/7 : RUN mkdir -p /images/custom /images/alpine && truncate -s 64M /images/custom/disk.img && curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /images/alpine/disk.img ---> Using cache ---> 82e9b97ea0e3 Step 5/7 : ADD entrypoint.sh / ---> Using cache ---> 239b6b287445 Step 6/7 : CMD /entrypoint.sh ---> Using cache ---> 037a337007e7 Step 7/7 : LABEL "disks-images-provider" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> 0b0dc36413d9 Successfully built 0b0dc36413d9 Sending build context to Docker daemon 2.56 kB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b730b4ed65df Step 3/5 : ENV container docker ---> Using cache ---> f899e4585fe3 Step 4/5 : RUN dnf -y install procps-ng nmap-ncat && dnf -y clean all ---> Using cache ---> 98d448efbabd Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "vm-killer" '' ---> Using cache ---> f88137917d15 Successfully built f88137917d15 Sending build context to Docker daemon 5.12 kB Step 1/7 : FROM debian:sid ---> 496290160351 Step 2/7 : MAINTAINER "David Vossel" \ ---> Using cache ---> 3b36b527fef8 Step 3/7 : ENV container docker ---> Using cache ---> b3ada414d649 Step 4/7 : RUN apt-get update && apt-get install -y bash curl bzip2 qemu-utils && mkdir -p /disk && rm -rf /var/lib/apt/lists/* ---> Using cache ---> 337be6171fcb Step 5/7 : ADD entry-point.sh / ---> Using cache ---> a98a961fa5a1 Step 6/7 : CMD /entry-point.sh ---> Using cache ---> 19baf5d1aab8 Step 7/7 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "registry-disk-v1alpha" '' ---> Using cache ---> acb3024d4b04 Successfully built acb3024d4b04 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:34244/kubevirt/registry-disk-v1alpha:devel ---> acb3024d4b04 Step 2/4 : MAINTAINER "David Vossel" \ ---> Using cache ---> db299d3a78fb Step 3/4 : RUN curl https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img > /disk/cirros.img ---> Using cache ---> 09cb5a0563dc Step 4/4 : LABEL "cirros-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> 3af24e0ca6c0 Successfully built 3af24e0ca6c0 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:34244/kubevirt/registry-disk-v1alpha:devel ---> acb3024d4b04 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 393e7f09838e Step 3/4 : RUN curl -g -L https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 > /disk/fedora.qcow2 ---> Using cache ---> 73632b993cef Step 4/4 : LABEL "fedora-cloud-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> 800e4dcf561c Successfully built 800e4dcf561c Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:34244/kubevirt/registry-disk-v1alpha:devel ---> acb3024d4b04 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 393e7f09838e Step 3/4 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /disk/alpine.iso ---> Using cache ---> b36f5acbcad0 Step 4/4 : LABEL "alpine-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> f670a320082e Successfully built f670a320082e Sending build context to Docker daemon 34.04 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b730b4ed65df Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virtctl ---> Using cache ---> 4f94005a8ef0 Step 4/8 : WORKDIR /home/virtctl ---> Using cache ---> 78481d3ffb57 Step 5/8 : USER 1001 ---> Using cache ---> 05fee420f2cf Step 6/8 : COPY subresource-access-test /subresource-access-test ---> d1f0bbfc20ed Removing intermediate container 3c810a6f4bbb Step 7/8 : ENTRYPOINT /subresource-access-test ---> Running in 0694e84d687f ---> 5336f42332cd Removing intermediate container 0694e84d687f Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "subresource-access-test" '' ---> Running in f4ba906c13a7 ---> d8b3f743b395 Removing intermediate container f4ba906c13a7 Successfully built d8b3f743b395 Sending build context to Docker daemon 3.072 kB Step 1/9 : FROM fedora:27 ---> 9110ae7f579f Step 2/9 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b730b4ed65df Step 3/9 : ENV container docker ---> Using cache ---> f899e4585fe3 Step 4/9 : RUN dnf -y install make git gcc && dnf -y clean all ---> Using cache ---> 8e4b2ebac4a7 Step 5/9 : ENV GIMME_GO_VERSION 1.9.2 ---> Using cache ---> 88f79aa9e9f7 Step 6/9 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> 87d2f8a07b0b Step 7/9 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> ed0543d85229 Step 8/9 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/masterzen/winrm-cli ---> Using cache ---> b34d756315a5 Step 9/9 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "winrmcli" '' ---> Using cache ---> c5bf154b3fb5 Successfully built c5bf154b3fb5 Sending build context to Docker daemon 34.49 MB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> b730b4ed65df Step 3/5 : COPY example-hook-sidecar /example-hook-sidecar ---> f3644dba6c3a Removing intermediate container 8185a1d9df65 Step 4/5 : ENTRYPOINT /example-hook-sidecar ---> Running in 530ceed8b675 ---> 36d55c8efe5e Removing intermediate container 530ceed8b675 Step 5/5 : LABEL "example-hook-sidecar" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in 185761ece6e1 ---> 299ac66fe2cf Removing intermediate container 185761ece6e1 Successfully built 299ac66fe2cf hack/build-docker.sh push The push refers to a repository [localhost:34244/kubevirt/virt-controller] a32b362e2091: Preparing 2f3c351af6e6: Preparing 39bae602f753: Preparing 2f3c351af6e6: Pushed a32b362e2091: Pushed 39bae602f753: Pushed devel: digest: sha256:dc7d6df34159cfeba8fef7ce14d9f15d0659deff2e0d4ab7053a3acfec1daa63 size: 948 The push refers to a repository [localhost:34244/kubevirt/virt-launcher] 2226adfc6edb: Preparing 9efa82e5f605: Preparing 279ff92b69a9: Preparing 4008b48d369f: Preparing 0d84393a327b: Preparing 375f4f593077: Preparing 9e20b26113ea: Preparing a1a99db27cd1: Preparing 9e20b26113ea: Waiting ec5be2616f4d: Preparing ffcfbc9458ac: Preparing ec5be2616f4d: Waiting 68e0ce966da1: Preparing 39bae602f753: Preparing ffcfbc9458ac: Waiting 39bae602f753: Waiting 9efa82e5f605: Pushed 2226adfc6edb: Pushed 4008b48d369f: Pushed 9e20b26113ea: Pushed a1a99db27cd1: Pushed ec5be2616f4d: Pushed ffcfbc9458ac: Pushed 39bae602f753: Mounted from kubevirt/virt-controller 279ff92b69a9: Pushed 375f4f593077: Pushed 0d84393a327b: Pushed 68e0ce966da1: Pushed devel: digest: sha256:1e610f7f40629d7c8bddb910f17ccf2509431788618ecceccd0c75de9de29db2 size: 2828 The push refers to a repository [localhost:34244/kubevirt/virt-handler] 566fc4430b87: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-launcher 566fc4430b87: Pushed devel: digest: sha256:daec4618f6709f3d7b70df028b7b6b12540d4687b1cab2d1514d237b618293ce size: 741 The push refers to a repository [localhost:34244/kubevirt/virt-api] c3772a11ea84: Preparing cb5027ae4f61: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-handler cb5027ae4f61: Pushed c3772a11ea84: Pushed devel: digest: sha256:58e74b2894dd72a801f66b6abd8ad0d00bdf3370165ce3f7528d2bc3145c480d size: 948 The push refers to a repository [localhost:34244/kubevirt/disks-images-provider] 5dd5eb5c1118: Preparing 2c38d01b8132: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-api 5dd5eb5c1118: Pushed 2c38d01b8132: Pushed devel: digest: sha256:db0d5a9739ac9a06ddd214e9a4b6c5a119717a465754ed1933bbf62ea53c986a size: 948 The push refers to a repository [localhost:34244/kubevirt/vm-killer] 9470a2bb70a6: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/disks-images-provider 9470a2bb70a6: Pushed devel: digest: sha256:e332efb784ec9f3e7327ae6aaabe27e4cae4e4b3ce4d05362c4bc9f63c14b1fc size: 740 The push refers to a repository [localhost:34244/kubevirt/registry-disk-v1alpha] bfd12fa374fa: Preparing 18ac8ad2aee9: Preparing 132d61a890c5: Preparing bfd12fa374fa: Pushed 18ac8ad2aee9: Pushed 132d61a890c5: Pushed devel: digest: sha256:ece43221be6d43a2437c3577d2eb6badeb9f2d0023257811e2fcd5b6c7638410 size: 948 The push refers to a repository [localhost:34244/kubevirt/cirros-registry-disk-demo] 9c41418164fc: Preparing bfd12fa374fa: Preparing 18ac8ad2aee9: Preparing 132d61a890c5: Preparing 18ac8ad2aee9: Mounted from kubevirt/registry-disk-v1alpha 132d61a890c5: Mounted from kubevirt/registry-disk-v1alpha bfd12fa374fa: Mounted from kubevirt/registry-disk-v1alpha 9c41418164fc: Pushed devel: digest: sha256:47a787d6b2baf5f696266941d963477115d09724d615359cfbd52a4d0bb18859 size: 1160 The push refers to a repository [localhost:34244/kubevirt/fedora-cloud-registry-disk-demo] 6496bf842226: Preparing bfd12fa374fa: Preparing 18ac8ad2aee9: Preparing 132d61a890c5: Preparing 18ac8ad2aee9: Mounted from kubevirt/cirros-registry-disk-demo 132d61a890c5: Mounted from kubevirt/cirros-registry-disk-demo bfd12fa374fa: Mounted from kubevirt/cirros-registry-disk-demo 6496bf842226: Pushed devel: digest: sha256:64ad85d26676696b49e259a87e9691bc0e49d262dc40cf77e983ed7a03a351d5 size: 1161 The push refers to a repository [localhost:34244/kubevirt/alpine-registry-disk-demo] 930b97839a9b: Preparing bfd12fa374fa: Preparing 18ac8ad2aee9: Preparing 132d61a890c5: Preparing bfd12fa374fa: Mounted from kubevirt/fedora-cloud-registry-disk-demo 18ac8ad2aee9: Mounted from kubevirt/fedora-cloud-registry-disk-demo 132d61a890c5: Mounted from kubevirt/fedora-cloud-registry-disk-demo 930b97839a9b: Pushed devel: digest: sha256:58d1952ddfe0fa4ad8b764845a07d6bcf66f1717a68e8d498aea1fafa2c1c74c size: 1160 The push refers to a repository [localhost:34244/kubevirt/subresource-access-test] 8519f3d9c9a1: Preparing 00a62312fc63: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/vm-killer 00a62312fc63: Pushed 8519f3d9c9a1: Pushed devel: digest: sha256:6592fe5c59e469384a8e172f0257d60d8775354b00994502dbede26a3f8f93fd size: 948 The push refers to a repository [localhost:34244/kubevirt/winrmcli] 594e0d4b7316: Preparing 4d98f841ba37: Preparing e8c601393c84: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/subresource-access-test 594e0d4b7316: Pushed e8c601393c84: Pushed 4d98f841ba37: Pushed devel: digest: sha256:f4d393938e103ed3e5222a1000af79b771bd52efbb6be9f7bad8389531a278ca size: 1165 The push refers to a repository [localhost:34244/kubevirt/example-hook-sidecar] d09d01b6179a: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/winrmcli d09d01b6179a: Pushed devel: digest: sha256:b2656225067fbc8e2a72605a28f88a7c33e2c170c172983bbbb0831eb5221597 size: 740 make[1]: Leaving directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' Done ./cluster/clean.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-44-g3d60f85 ++ KUBEVIRT_VERSION=v0.7.0-44-g3d60f85 + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:0977f1c716862710c8324798b95802e11a149f4532e33be20dd70877fe8f5332 ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:34244/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Cleaning up ...' Cleaning up ... + cluster/kubectl.sh get vmis --all-namespaces -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,FINALIZERS:.metadata.finalizers --no-headers + grep foregroundDeleteVirtualMachine + read p error: the server doesn't have a resource type "vmis" + _kubectl delete ds -l kubevirt.io -n kube-system --cascade=false --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=libvirt --force --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=virt-handler --force --grace-period 0 No resources found + namespaces=(default ${namespace}) + for i in '${namespaces[@]}' + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete deployment -l kubevirt.io No resources found + _kubectl -n default delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rs -l kubevirt.io No resources found + _kubectl -n default delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete services -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n default delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete secrets -l kubevirt.io No resources found + _kubectl -n default delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pv -l kubevirt.io No resources found + _kubectl -n default delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pvc -l kubevirt.io No resources found + _kubectl -n default delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete ds -l kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n default delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pods -l kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n default delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rolebinding -l kubevirt.io No resources found + _kubectl -n default delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete roles -l kubevirt.io No resources found + _kubectl -n default delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterroles -l kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n default get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + for i in '${namespaces[@]}' + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete deployment -l kubevirt.io No resources found + _kubectl -n kube-system delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rs -l kubevirt.io No resources found + _kubectl -n kube-system delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete services -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n kube-system delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete secrets -l kubevirt.io No resources found + _kubectl -n kube-system delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pv -l kubevirt.io No resources found + _kubectl -n kube-system delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pvc -l kubevirt.io No resources found + _kubectl -n kube-system delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete ds -l kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n kube-system delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pods -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete roles -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterroles -l kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + sleep 2 + echo Done Done ./cluster/deploy.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-44-g3d60f85 ++ KUBEVIRT_VERSION=v0.7.0-44-g3d60f85 + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:0977f1c716862710c8324798b95802e11a149f4532e33be20dd70877fe8f5332 ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:34244/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Deploying ...' Deploying ... + [[ -z openshift-3.10-release ]] + [[ openshift-3.10-release =~ .*-dev ]] + [[ openshift-3.10-release =~ .*-release ]] + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/demo-content.yaml =~ .*demo.* ]] + continue + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml =~ .*demo.* ]] + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml clusterrole.rbac.authorization.k8s.io "kubevirt.io:admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:edit" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:view" created serviceaccount "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver-auth-delegator" created rolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created role.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-controller" created serviceaccount "kubevirt-controller" created serviceaccount "kubevirt-privileged" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller-cluster-admin" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-privileged-cluster-admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:default" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt.io:default" created service "virt-api" created deployment.extensions "virt-api" created deployment.extensions "virt-controller" created daemonset.extensions "virt-handler" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstances.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancereplicasets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancepresets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachines.kubevirt.io" created + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R persistentvolumeclaim "disk-alpine" created persistentvolume "host-path-disk-alpine" created persistentvolumeclaim "disk-custom" created persistentvolume "host-path-disk-custom" created daemonset.extensions "disks-images-provider" created serviceaccount "kubevirt-testing" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-testing-cluster-admin" created + [[ os-3.10.0 =~ os-* ]] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-controller"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-testing"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-privileged"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-apiserver"] + _kubectl adm policy add-scc-to-user privileged admin + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged admin scc "privileged" added to: ["admin"] + echo Done Done + namespaces=(kube-system default) + [[ kube-system != \k\u\b\e\-\s\y\s\t\e\m ]] + timeout=300 + sample=30 + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'disks-images-provider-5hjmw 0/1 ContainerCreating 0 3s disks-images-provider-mdmdp 0/1 ContainerCreating 0 4s virt-api-7d79764579-d7nrn 0/1 ContainerCreating 0 6s virt-api-7d79764579-kgq4g 0/1 ContainerCreating 0 5s virt-controller-7d57d96b65-v5pw7 0/1 ContainerCreating 0 5s virt-handler-8s468 0/1 ContainerCreating 0 6s virt-handler-g269k 0/1 ContainerCreating 0 6s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + cluster/kubectl.sh get pods -n kube-system --no-headers + grep -v Running disks-images-provider-5hjmw 0/1 ContainerCreating 0 4s disks-images-provider-mdmdp 0/1 ContainerCreating 0 5s virt-api-7d79764579-kgq4g 0/1 ContainerCreating 0 6s virt-controller-7d57d96b65-v5pw7 0/1 ContainerCreating 0 6s virt-handler-8s468 0/1 ContainerCreating 0 7s virt-handler-g269k 0/1 ContainerCreating 0 7s + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ grep -v Running ++ cluster/kubectl.sh get pods -n kube-system --no-headers + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n false ']' + echo 'Waiting for KubeVirt containers to become ready ...' Waiting for KubeVirt containers to become ready ... + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + grep false false + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n kube-system + cluster/kubectl.sh get pods -n kube-system NAME READY STATUS RESTARTS AGE disks-images-provider-5hjmw 1/1 Running 0 1m disks-images-provider-mdmdp 1/1 Running 0 1m master-api-node01 1/1 Running 1 24d master-controllers-node01 1/1 Running 1 24d master-etcd-node01 1/1 Running 1 24d virt-api-7d79764579-d7nrn 1/1 Running 0 1m virt-api-7d79764579-kgq4g 1/1 Running 0 1m virt-controller-7d57d96b65-jrcdt 1/1 Running 0 1m virt-controller-7d57d96b65-v5pw7 1/1 Running 0 1m virt-handler-8s468 1/1 Running 0 1m virt-handler-g269k 1/1 Running 0 1m + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n default --no-headers ++ cluster/kubectl.sh get pods -n default --no-headers ++ grep -v Running + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n default + cluster/kubectl.sh get pods -n default NAME READY STATUS RESTARTS AGE docker-registry-1-tqlsm 1/1 Running 1 24d registry-console-1-bhtqz 1/1 Running 2 24d router-1-r2xxq 1/1 Running 1 24d + kubectl version + cluster/kubectl.sh version oc v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 features: Basic-Auth GSSAPI Kerberos SPNEGO Server https://127.0.0.1:34241 openshift v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 + ginko_params='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + [[ -d /home/nfs/images/windows2016 ]] + [[ openshift-3.10-release =~ windows.* ]] + FUNC_TEST_ARGS='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + make functest hack/dockerized "hack/build-func-tests.sh" sha256:380a1cbdbe840bfd69bc431427e8cd6a0b9dd9815c87fb88c0d177c6886cc53b go version go1.10 linux/amd64 go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/functests.sh Running Suite: Tests Suite ========================== Random Seed: 1531740357 Will run 140 of 140 specs volumedisk0 compute • [SLOW TEST:39.655 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with 3 CPU cores /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:56 should report 3 cpu cores under guest OS /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:62 ------------------------------ S [SKIPPING] [0.225 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:108 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-2Mi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 No node with hugepages hugepages-2Mi capacity /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:160 ------------------------------ S [SKIPPING] [0.167 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:108 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-1Gi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 No node with hugepages hugepages-1Gi capacity /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:160 ------------------------------ • ------------------------------ • [SLOW TEST:79.979 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:277 should report defined CPU model /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:278 ------------------------------ • [SLOW TEST:82.035 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model not defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:305 should report CPU model from libvirt capabilities /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:306 ------------------------------ Pod name: disks-images-provider-5hjmw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-mdmdp Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-d7nrn Pod phase: Running level=info timestamp=2018-07-16T11:28:59.195413Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 11:29:00 http: TLS handshake error from 10.129.0.1:44196: EOF 2018/07/16 11:29:10 http: TLS handshake error from 10.129.0.1:44202: EOF level=info timestamp=2018-07-16T11:29:10.157788Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:29:12.982783Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:29:13.004424Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:29:13.021724Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:29:13.339404Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:29:13.508069Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:29:19.458283Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 11:29:20 http: TLS handshake error from 10.129.0.1:44208: EOF level=info timestamp=2018-07-16T11:29:21.042642Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 11:29:30 http: TLS handshake error from 10.129.0.1:44216: EOF level=info timestamp=2018-07-16T11:29:31.216750Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 11:29:40 http: TLS handshake error from 10.129.0.1:44222: EOF Pod name: virt-api-7d79764579-kgq4g Pod phase: Running 2018/07/16 11:28:31 http: TLS handshake error from 10.128.0.1:41670: EOF 2018/07/16 11:28:42 http: TLS handshake error from 10.128.0.1:41726: EOF level=info timestamp=2018-07-16T11:28:43.982521Z pos=subresource.go:78 component=virt-api msg="Websocket connection upgraded" 2018/07/16 11:28:51 http: TLS handshake error from 10.128.0.1:41782: EOF 2018/07/16 11:29:01 http: TLS handshake error from 10.128.0.1:41826: EOF level=info timestamp=2018-07-16T11:29:03.036421Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/16 11:29:11 http: TLS handshake error from 10.128.0.1:41880: EOF 2018/07/16 11:29:21 http: TLS handshake error from 10.128.0.1:41932: EOF level=error timestamp=2018-07-16T11:29:24.566179Z pos=subresource.go:88 component=virt-api msg= 2018/07/16 11:29:24 http: response.WriteHeader on hijacked connection level=error timestamp=2018-07-16T11:29:24.568656Z pos=subresource.go:100 component=virt-api reason="read tcp 10.128.0.23:8443->10.128.0.1:56048: use of closed network connection" msg="error ecountered reading from websocket stream" level=info timestamp=2018-07-16T11:29:24.573318Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmivd65z/console proto=HTTP/1.1 statusCode=200 contentLength=0 2018/07/16 11:29:31 http: TLS handshake error from 10.128.0.1:41982: EOF level=info timestamp=2018-07-16T11:29:33.015244Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/16 11:29:41 http: TLS handshake error from 10.128.0.1:42038: EOF Pod name: virt-controller-7d57d96b65-jrcdt Pod phase: Running level=info timestamp=2018-07-16T11:26:41.292088Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmisrq4w kind= uid=1cee13dc-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:27:00.571695Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmimxjcg kind= uid=286e3326-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:27:00.572267Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmimxjcg kind= uid=286e3326-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:27:00.886320Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmimxjcg\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmimxjcg" level=info timestamp=2018-07-16T11:27:00.978311Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmimxjcg\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmimxjcg" level=info timestamp=2018-07-16T11:28:01.536887Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9kh9f kind= uid=4cc4bf2b-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:28:01.537534Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9kh9f kind= uid=4cc4bf2b-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:28:01.733142Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kh9f\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kh9f" level=info timestamp=2018-07-16T11:28:21.234633Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:28:21.235038Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:28:21.608854Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmivd65z\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmivd65z" level=info timestamp=2018-07-16T11:29:24.217535Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9q55w kind= uid=7e0c6c72-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:29:24.218154Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9q55w kind= uid=7e0c6c72-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:29:24.414998Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9q55w\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9q55w" level=info timestamp=2018-07-16T11:29:24.465147Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9q55w\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9q55w" Pod name: virt-controller-7d57d96b65-v5pw7 Pod phase: Running level=info timestamp=2018-07-16T11:24:12.341078Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-8s468 Pod phase: Running level=info timestamp=2018-07-16T11:29:24.258260Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-16T11:29:24.258841Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-16T11:29:24.260226Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-16T11:29:24.273326Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=error timestamp=2018-07-16T11:29:24.384791Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind= uid=4cc4bf2b-88eb-11e8-bd4c-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kh9f\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi9kh9f, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4cc4bf2b-88eb-11e8-bd4c-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-16T11:29:24.386230Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kh9f\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi9kh9f, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4cc4bf2b-88eb-11e8-bd4c-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kh9f" level=info timestamp=2018-07-16T11:29:24.386658Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.387788Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.387859Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.392411Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.392609Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.448426Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.449195Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.553001Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.553322Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-g269k Pod phase: Running level=info timestamp=2018-07-16T11:26:17.459741Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi8pp9d kind= uid=0443ae00-88eb-11e8-bd4c-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-16T11:26:17.497869Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi8pp9d kind= uid=0443ae00-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:26:39.453357Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmi8pp9d kind= uid=0443ae00-88eb-11e8-bd4c-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-16T11:26:39.453872Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmi8pp9d kind= uid=0443ae00-88eb-11e8-bd4c-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-16T11:26:39.456157Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmi8pp9d kind= uid=0443ae00-88eb-11e8-bd4c-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmi8pp9d" level=info timestamp=2018-07-16T11:26:39.693737Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-16T11:26:39.702915Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmi8pp9d kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-16T11:26:39.705601Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi8pp9d kind= uid=0443ae00-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:26:39.707498Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-16T11:26:39.707917Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi8pp9d kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:26:39.709827Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi8pp9d kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:40.867413Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi9q55w kind= uid=7e0c6c72-88eb-11e8-bd4c-525500d15501 msg="Processing vmi update" level=error timestamp=2018-07-16T11:29:40.897140Z pos=vm.go:397 component=virt-handler namespace=kubevirt-test-default name=testvmi9q55w kind= uid=7e0c6c72-88eb-11e8-bd4c-525500d15501 reason="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" msg="Synchronizing the VirtualMachineInstance failed." level=info timestamp=2018-07-16T11:29:40.940211Z pos=vm.go:251 component=virt-handler reason="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmi9q55w" level=info timestamp=2018-07-16T11:29:40.940874Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi9q55w kind= uid=7e0c6c72-88eb-11e8-bd4c-525500d15501 msg="Processing vmi update" Pod name: virt-launcher-testvmi9q55w-6mhrk Pod phase: Running level=info timestamp=2018-07-16T11:29:30.333243Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-16T11:29:30.335156Z pos=libvirt.go:256 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-16T11:29:40.345589Z pos=libvirt.go:271 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-16T11:29:40.405488Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmi9q55w" level=info timestamp=2018-07-16T11:29:40.407548Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-16T11:29:40.408153Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" level=error timestamp=2018-07-16T11:29:40.889131Z pos=manager.go:159 component=virt-launcher namespace=kubevirt-test-default name=testvmi9q55w kind= uid=7e0c6c72-88eb-11e8-bd4c-525500d15501 reason="virError(Code=0, Domain=0, Message='Missing error')" msg="Getting the domain failed." level=error timestamp=2018-07-16T11:29:40.889592Z pos=server.go:68 component=virt-launcher namespace=kubevirt-test-default name=testvmi9q55w kind= uid=7e0c6c72-88eb-11e8-bd4c-525500d15501 reason="virError(Code=0, Domain=0, Message='Missing error')" msg="Failed to sync vmi" level=info timestamp=2018-07-16T11:29:40.972080Z pos=cloud-init.go:254 component=virt-launcher msg="generated nocloud iso file /var/run/libvirt/kubevirt-ephemeral-disk/cloud-init-data/kubevirt-test-default/testvmi9q55w/noCloud.iso" level=error timestamp=2018-07-16T11:29:41.029274Z pos=common.go:126 component=virt-launcher msg="updated MAC for interface: eth0 - 0a:58:0a:9a:a5:0d" level=info timestamp=2018-07-16T11:29:41.044040Z pos=converter.go:729 component=virt-launcher msg="Found nameservers in /etc/resolv.conf: \ufffd\ufffdBf" level=info timestamp=2018-07-16T11:29:41.044439Z pos=converter.go:730 component=virt-launcher msg="Found search domains in /etc/resolv.conf: kubevirt-test-default.svc.cluster.local svc.cluster.local cluster.local" level=info timestamp=2018-07-16T11:29:41.044733Z pos=dhcp.go:62 component=virt-launcher msg="Starting SingleClientDHCPServer" level=info timestamp=2018-07-16T11:29:41.085783Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-16T11:29:41.096455Z pos=manager.go:157 component=virt-launcher namespace=kubevirt-test-default name=testvmi9q55w kind= uid=7e0c6c72-88eb-11e8-bd4c-525500d15501 msg="Domain defined." • Failure [101.676 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 New VirtualMachineInstance with all supported drives /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:326 should have all the device nodes [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:349 Unexpected Warning event received. Expected : Warning not to equal : Warning /root/go/src/kubevirt.io/kubevirt/tests/utils.go:245 ------------------------------ level=info timestamp=2018-07-16T11:29:24.810255Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmi9q55w-6mhrk" level=info timestamp=2018-07-16T11:29:41.253631Z pos=utils.go:243 component=tests msg="Pod owner ship transferred to the node virt-launcher-testvmi9q55w-6mhrk" level=error timestamp=2018-07-16T11:29:41.336259Z pos=utils.go:241 component=tests reason="unexpected warning event received" msg="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" level=info timestamp=2018-07-16T11:31:05.078020Z pos=vmi_configuration_test.go:363 component=tests namespace=kubevirt-test-default name=testvmi9q55w kind=VirtualMachineInstance uid=7e0c6c72-88eb-11e8-bd4c-525500d15501 msg="[{1 ls /dev/sda /dev/vda /dev/vdb [/dev/sda /dev/vda /dev/vdb]}]" • [SLOW TEST:38.073 seconds] LeaderElection /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:43 Start a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:53 when the controller pod is not running /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:54 should success /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:55 ------------------------------ ••• ------------------------------ • [SLOW TEST:8.602 seconds] Templates /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:42 Launching VMI from VM Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:60 with given Fedora Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:193 with given VM JSON from the Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:152 with given VM from the VM JSON /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:158 with given VMI from the VM /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:163 should succeed to terminate the VMI using oc-patch command /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:166 ------------------------------ Pod name: disks-images-provider-5hjmw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-mdmdp Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-d7nrn Pod phase: Running 2018/07/16 11:32:51 http: TLS handshake error from 10.129.0.1:44344: EOF level=info timestamp=2018-07-16T11:32:52.151714Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:32:52.292367Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:32:52.743407Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:32:55.612448Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/openapi/v2 proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-16T11:32:55.706594Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/swagger.json proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-16T11:32:57.813150Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 11:33:00 http: TLS handshake error from 10.129.0.1:44350: EOF level=info timestamp=2018-07-16T11:33:08.609602Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 11:33:10 http: TLS handshake error from 10.129.0.1:44356: EOF level=info timestamp=2018-07-16T11:33:19.391589Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 11:33:20 http: TLS handshake error from 10.129.0.1:44362: EOF level=info timestamp=2018-07-16T11:33:22.648968Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:33:22.796823Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:33:23.611830Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-kgq4g Pod phase: Running 2018/07/16 11:31:31 http: TLS handshake error from 10.128.0.1:42618: EOF 2018/07/16 11:31:41 http: TLS handshake error from 10.128.0.1:42672: EOF 2018/07/16 11:31:51 http: TLS handshake error from 10.128.0.1:42722: EOF 2018/07/16 11:32:01 http: TLS handshake error from 10.128.0.1:42772: EOF level=info timestamp=2018-07-16T11:32:02.804925Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/16 11:32:11 http: TLS handshake error from 10.128.0.1:42826: EOF 2018/07/16 11:32:21 http: TLS handshake error from 10.128.0.1:42878: EOF 2018/07/16 11:32:31 http: TLS handshake error from 10.128.0.1:42928: EOF 2018/07/16 11:32:41 http: TLS handshake error from 10.128.0.1:42982: EOF level=info timestamp=2018-07-16T11:32:46.278418Z pos=subresource.go:78 component=virt-api msg="Websocket connection upgraded" 2018/07/16 11:32:51 http: TLS handshake error from 10.128.0.1:43040: EOF 2018/07/16 11:33:01 http: TLS handshake error from 10.128.0.1:43090: EOF level=info timestamp=2018-07-16T11:33:02.815861Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/16 11:33:11 http: TLS handshake error from 10.128.0.1:43144: EOF 2018/07/16 11:33:21 http: TLS handshake error from 10.128.0.1:43196: EOF Pod name: virt-controller-7d57d96b65-p6vnn Pod phase: Running level=info timestamp=2018-07-16T11:31:09.354793Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-v5pw7 Pod phase: Running level=info timestamp=2018-07-16T11:32:00.703838Z pos=vm.go:262 component=virt-controller service=http msg="vmi is nil" level=info timestamp=2018-07-16T11:32:00.724162Z pos=vm.go:135 component=virt-controller service=http namespace=default name=testvm kind= uid=d7a3c812-88eb-11e8-bd4c-525500d15501 msg="Started processing VM" level=info timestamp=2018-07-16T11:32:00.724495Z pos=vm.go:186 component=virt-controller service=http namespace=default name=testvm kind= uid=d7a3c812-88eb-11e8-bd4c-525500d15501 msg="Creating or the VirtualMachineInstance: false" level=info timestamp=2018-07-16T11:32:00.724582Z pos=vm.go:262 component=virt-controller service=http msg="vmi is nil" level=info timestamp=2018-07-16T11:32:02.302644Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:32:02.303387Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:32:02.328270Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:32:02.328801Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:32:02.366195Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:32:02.366317Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:32:02.392902Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:32:02.393037Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:32:02.505124Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9cn2k\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9cn2k" level=info timestamp=2018-07-16T11:32:02.564903Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmigwq5s\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmigwq5s" level=info timestamp=2018-07-16T11:32:02.944307Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmittxxb\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmittxxb" Pod name: virt-handler-8s468 Pod phase: Running level=info timestamp=2018-07-16T11:29:24.258260Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-16T11:29:24.258841Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-16T11:29:24.260226Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-16T11:29:24.273326Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=error timestamp=2018-07-16T11:29:24.384791Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind= uid=4cc4bf2b-88eb-11e8-bd4c-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kh9f\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi9kh9f, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4cc4bf2b-88eb-11e8-bd4c-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-16T11:29:24.386230Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kh9f\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi9kh9f, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4cc4bf2b-88eb-11e8-bd4c-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kh9f" level=info timestamp=2018-07-16T11:29:24.386658Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.387788Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.387859Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.392411Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.392609Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.448426Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.449195Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.553001Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.553322Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-g269k Pod phase: Running level=info timestamp=2018-07-16T11:32:41.121314Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:32:41.121489Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-16T11:32:41.243907Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-16T11:32:41.279379Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:32:41.296430Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="No update processing required" level=info timestamp=2018-07-16T11:32:41.463498Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:32:41.530680Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:32:41.628371Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-16T11:32:42.068066Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=error timestamp=2018-07-16T11:32:42.177338Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmittxxb\": the object has been modified; please apply your changes to the latest version and try again" msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-16T11:32:42.889502Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmittxxb\": the object has been modified; please apply your changes to the latest version and try again" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmittxxb" level=info timestamp=2018-07-16T11:32:42.889679Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-16T11:32:43.271462Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:32:43.271663Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-16T11:32:44.514382Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmi9cn2k-4w22z Pod phase: Running level=info timestamp=2018-07-16T11:32:27.323385Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-16T11:32:28.088316Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-16T11:32:28.117543Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID d0d602ef-bb14-4b8d-b7e3-bbed1c16b68e" level=info timestamp=2018-07-16T11:32:28.125676Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-16T11:32:28.552635Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:29.157929Z pos=monitor.go:222 component=virt-launcher msg="Found PID for d0d602ef-bb14-4b8d-b7e3-bbed1c16b68e: 167" level=info timestamp=2018-07-16T11:32:29.269036Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-16T11:32:29.305392Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Domain started." level=info timestamp=2018-07-16T11:32:29.317386Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:29.317584Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:29.416512Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:29.416652Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-16T11:32:29.465298Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:29.738333Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:38.681996Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmigwq5s-wl6rq Pod phase: Running level=info timestamp=2018-07-16T11:32:28.853277Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-16T11:32:28.958472Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID b29cac00-eefb-4918-ae0e-5ed003fe0554" level=info timestamp=2018-07-16T11:32:28.958945Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-16T11:32:29.153770Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:29.872579Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-16T11:32:29.908424Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Domain started." level=info timestamp=2018-07-16T11:32:29.922014Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:29.922943Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:29.976287Z pos=monitor.go:222 component=virt-launcher msg="Found PID for b29cac00-eefb-4918-ae0e-5ed003fe0554: 172" level=info timestamp=2018-07-16T11:32:30.251516Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:30.251744Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-16T11:32:30.283625Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:34.144978Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:38.746966Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:41.125845Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmiksqn7-q9zkn Pod phase: Running level=info timestamp=2018-07-16T11:32:27.746414Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-16T11:32:28.838882Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-16T11:32:28.857677Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID 1468f29e-25bd-4aa1-8234-be6ed406789e" level=info timestamp=2018-07-16T11:32:28.857991Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-16T11:32:28.909547Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:29.801522Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-16T11:32:29.872389Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 1468f29e-25bd-4aa1-8234-be6ed406789e: 175" level=info timestamp=2018-07-16T11:32:29.903285Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Domain started." level=info timestamp=2018-07-16T11:32:29.910182Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:29.920402Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:31.909494Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:31.909724Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-16T11:32:31.979961Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:34.304921Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:41.673957Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmittxxb-tlhsx Pod phase: Running level=info timestamp=2018-07-16T11:32:34.211599Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-16T11:32:34.225560Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID 883cc4fd-7f2a-437b-93a3-c2972ff4e7b0" level=info timestamp=2018-07-16T11:32:34.251386Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-16T11:32:35.262093Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 883cc4fd-7f2a-437b-93a3-c2972ff4e7b0: 181" level=info timestamp=2018-07-16T11:32:35.430150Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Domain started." level=info timestamp=2018-07-16T11:32:35.434577Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:37.215795Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:37.216567Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-16T11:32:37.256742Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:41.036406Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:41.036890Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-16T11:32:41.111476Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:41.245012Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:42.897120Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:43.282641Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" • Failure in Spec Setup (BeforeEach) [206.853 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be able to reach [BeforeEach] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Unexpected Warning event received. Expected : Warning not to equal : Warning /root/go/src/kubevirt.io/kubevirt/tests/utils.go:245 ------------------------------ level=info timestamp=2018-07-16T11:32:02.260105Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmiksqn7-q9zkn" level=info timestamp=2018-07-16T11:32:28.002627Z pos=utils.go:243 component=tests msg="Pod owner ship transferred to the node virt-launcher-testvmiksqn7-q9zkn" level=info timestamp=2018-07-16T11:32:38.169367Z pos=utils.go:243 component=tests msg="VirtualMachineInstance defined." level=info timestamp=2018-07-16T11:32:45.784378Z pos=utils.go:243 component=tests msg="VirtualMachineInstance started." level=info timestamp=2018-07-16T11:33:26.140071Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmi9cn2k-4w22z" level=error timestamp=2018-07-16T11:33:26.140312Z pos=utils.go:241 component=tests reason="unexpected warning event received" msg="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" level=info timestamp=2018-07-16T11:35:06.950892Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmigwq5s-wl6rq" level=info timestamp=2018-07-16T11:35:06.951070Z pos=utils.go:243 component=tests msg="Pod owner ship transferred to the node virt-launcher-testvmigwq5s-wl6rq" level=info timestamp=2018-07-16T11:35:06.951749Z pos=utils.go:243 component=tests msg="VirtualMachineInstance defined." level=info timestamp=2018-07-16T11:35:06.951928Z pos=utils.go:243 component=tests msg="VirtualMachineInstance started." level=info timestamp=2018-07-16T11:35:17.351909Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmittxxb-tlhsx" level=info timestamp=2018-07-16T11:35:17.352148Z pos=utils.go:243 component=tests msg="Pod owner ship transferred to the node virt-launcher-testvmittxxb-tlhsx" level=info timestamp=2018-07-16T11:35:17.352759Z pos=utils.go:243 component=tests msg="VirtualMachineInstance defined." level=info timestamp=2018-07-16T11:35:17.352924Z pos=utils.go:243 component=tests msg="VirtualMachineInstance started." level=info timestamp=2018-07-16T11:35:28.823037Z pos=vmi_networking_test.go:112 component=tests msg="[{1 \r\n$ [$ ]} {3 screen -d -m nc -klp 1500 -e echo -e \"Hello World!\"\r\n$ [$ ]} {5 echo $?\r\n0\r\n [0]}]" • Pod name: disks-images-provider-5hjmw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-mdmdp Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-d7nrn Pod phase: Running 2018/07/16 11:38:00 http: TLS handshake error from 10.129.0.1:44542: EOF level=info timestamp=2018-07-16T11:38:02.419999Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-16T11:38:05.194559Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 11:38:10 http: TLS handshake error from 10.129.0.1:44548: EOF level=info timestamp=2018-07-16T11:38:15.270446Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:38:15.927398Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:38:15.980297Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:38:16.019754Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 11:38:20 http: TLS handshake error from 10.129.0.1:44554: EOF level=info timestamp=2018-07-16T11:38:25.336276Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:38:26.494134Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:38:26.531935Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T11:38:26.565903Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 11:38:30 http: TLS handshake error from 10.129.0.1:44560: EOF level=info timestamp=2018-07-16T11:38:35.412382Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-kgq4g Pod phase: Running level=info timestamp=2018-07-16T11:36:33.083818Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/16 11:36:41 http: TLS handshake error from 10.128.0.1:44302: EOF 2018/07/16 11:36:51 http: TLS handshake error from 10.128.0.1:44352: EOF 2018/07/16 11:37:01 http: TLS handshake error from 10.128.0.1:44402: EOF level=info timestamp=2018-07-16T11:37:03.014335Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/16 11:37:11 http: TLS handshake error from 10.128.0.1:44456: EOF 2018/07/16 11:37:21 http: TLS handshake error from 10.128.0.1:44506: EOF 2018/07/16 11:37:31 http: TLS handshake error from 10.128.0.1:44556: EOF 2018/07/16 11:37:41 http: TLS handshake error from 10.128.0.1:44610: EOF 2018/07/16 11:37:51 http: TLS handshake error from 10.128.0.1:44660: EOF 2018/07/16 11:38:01 http: TLS handshake error from 10.128.0.1:44710: EOF 2018/07/16 11:38:11 http: TLS handshake error from 10.128.0.1:44764: EOF 2018/07/16 11:38:21 http: TLS handshake error from 10.128.0.1:44816: EOF 2018/07/16 11:38:31 http: TLS handshake error from 10.128.0.1:44866: EOF level=info timestamp=2018-07-16T11:38:32.924680Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 Pod name: virt-controller-7d57d96b65-p6vnn Pod phase: Running level=info timestamp=2018-07-16T11:31:09.354793Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-v5pw7 Pod phase: Running level=info timestamp=2018-07-16T11:32:00.703838Z pos=vm.go:262 component=virt-controller service=http msg="vmi is nil" level=info timestamp=2018-07-16T11:32:00.724162Z pos=vm.go:135 component=virt-controller service=http namespace=default name=testvm kind= uid=d7a3c812-88eb-11e8-bd4c-525500d15501 msg="Started processing VM" level=info timestamp=2018-07-16T11:32:00.724495Z pos=vm.go:186 component=virt-controller service=http namespace=default name=testvm kind= uid=d7a3c812-88eb-11e8-bd4c-525500d15501 msg="Creating or the VirtualMachineInstance: false" level=info timestamp=2018-07-16T11:32:00.724582Z pos=vm.go:262 component=virt-controller service=http msg="vmi is nil" level=info timestamp=2018-07-16T11:32:02.302644Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:32:02.303387Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:32:02.328270Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:32:02.328801Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:32:02.366195Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:32:02.366317Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:32:02.392902Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T11:32:02.393037Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T11:32:02.505124Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9cn2k\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi9cn2k" level=info timestamp=2018-07-16T11:32:02.564903Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmigwq5s\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmigwq5s" level=info timestamp=2018-07-16T11:32:02.944307Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmittxxb\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmittxxb" Pod name: virt-handler-8s468 Pod phase: Running level=info timestamp=2018-07-16T11:29:24.258260Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-16T11:29:24.258841Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-16T11:29:24.260226Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-16T11:29:24.273326Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=error timestamp=2018-07-16T11:29:24.384791Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind= uid=4cc4bf2b-88eb-11e8-bd4c-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kh9f\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi9kh9f, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4cc4bf2b-88eb-11e8-bd4c-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-16T11:29:24.386230Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi9kh9f\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmi9kh9f, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 4cc4bf2b-88eb-11e8-bd4c-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmi9kh9f" level=info timestamp=2018-07-16T11:29:24.386658Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.387788Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.387859Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.392411Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.392609Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi9kh9f kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.448426Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.449195Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind= uid=58813558-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:29:24.553001Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:29:24.553322Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmivd65z kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-g269k Pod phase: Running level=info timestamp=2018-07-16T11:32:41.121314Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:32:41.121489Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-16T11:32:41.243907Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-16T11:32:41.279379Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:32:41.296430Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="No update processing required" level=info timestamp=2018-07-16T11:32:41.463498Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:32:41.530680Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:32:41.628371Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-16T11:32:42.068066Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=error timestamp=2018-07-16T11:32:42.177338Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmittxxb\": the object has been modified; please apply your changes to the latest version and try again" msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-16T11:32:42.889502Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmittxxb\": the object has been modified; please apply your changes to the latest version and try again" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmittxxb" level=info timestamp=2018-07-16T11:32:42.889679Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-16T11:32:43.271462Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:32:43.271663Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-16T11:32:44.514382Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmi9cn2k-4w22z Pod phase: Running level=info timestamp=2018-07-16T11:32:27.323385Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-16T11:32:28.088316Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-16T11:32:28.117543Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID d0d602ef-bb14-4b8d-b7e3-bbed1c16b68e" level=info timestamp=2018-07-16T11:32:28.125676Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-16T11:32:28.552635Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:29.157929Z pos=monitor.go:222 component=virt-launcher msg="Found PID for d0d602ef-bb14-4b8d-b7e3-bbed1c16b68e: 167" level=info timestamp=2018-07-16T11:32:29.269036Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-16T11:32:29.305392Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Domain started." level=info timestamp=2018-07-16T11:32:29.317386Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:29.317584Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:29.416512Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:29.416652Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-16T11:32:29.465298Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:29.738333Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:38.681996Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi9cn2k kind= uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmigwq5s-wl6rq Pod phase: Running level=info timestamp=2018-07-16T11:32:28.853277Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-16T11:32:28.958472Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID b29cac00-eefb-4918-ae0e-5ed003fe0554" level=info timestamp=2018-07-16T11:32:28.958945Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-16T11:32:29.153770Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:29.872579Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-16T11:32:29.908424Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Domain started." level=info timestamp=2018-07-16T11:32:29.922014Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:29.922943Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:29.976287Z pos=monitor.go:222 component=virt-launcher msg="Found PID for b29cac00-eefb-4918-ae0e-5ed003fe0554: 172" level=info timestamp=2018-07-16T11:32:30.251516Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:30.251744Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-16T11:32:30.283625Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:34.144978Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:38.746966Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:41.125845Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmigwq5s kind= uid=dbee8483-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmiksqn7-q9zkn Pod phase: Running level=info timestamp=2018-07-16T11:32:27.746414Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-16T11:32:28.838882Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-16T11:32:28.857677Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID 1468f29e-25bd-4aa1-8234-be6ed406789e" level=info timestamp=2018-07-16T11:32:28.857991Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-16T11:32:28.909547Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:29.801522Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-16T11:32:29.872389Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 1468f29e-25bd-4aa1-8234-be6ed406789e: 175" level=info timestamp=2018-07-16T11:32:29.903285Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Domain started." level=info timestamp=2018-07-16T11:32:29.910182Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:29.920402Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:31.909494Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:31.909724Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-16T11:32:31.979961Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:34.304921Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:41.673957Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiksqn7 kind= uid=dbe69b78-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmittxxb-tlhsx Pod phase: Running level=info timestamp=2018-07-16T11:32:34.211599Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-16T11:32:34.225560Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID 883cc4fd-7f2a-437b-93a3-c2972ff4e7b0" level=info timestamp=2018-07-16T11:32:34.251386Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-16T11:32:35.262093Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 883cc4fd-7f2a-437b-93a3-c2972ff4e7b0: 181" level=info timestamp=2018-07-16T11:32:35.430150Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Domain started." level=info timestamp=2018-07-16T11:32:35.434577Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:37.215795Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:37.216567Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-16T11:32:37.256742Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:41.036406Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:41.036890Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-16T11:32:41.111476Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T11:32:41.245012Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T11:32:42.897120Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T11:32:43.282641Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmittxxb kind= uid=dbf30830-88eb-11e8-bd4c-525500d15501 msg="Synced vmi" ------------------------------ • Failure [187.751 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance with custom MAC address [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Expected error: : 180000000000 expect: timer expired after 180 seconds not to have occurred /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:224 ------------------------------ STEP: checking br1 MTU inside the pod level=info timestamp=2018-07-16T11:35:33.036201Z pos=vmi_networking_test.go:185 component=tests msg="4: br1: mtu 1450 qdisc noqueue state UP group default \n link/ether 0a:58:0a:b1:e7:98 brd ff:ff:ff:ff:ff:ff\n inet 169.254.75.86/32 brd 169.254.75.86 scope global br1\n valid_lft forever preferred_lft forever\n inet6 fe80::858:aff:feb1:e798/64 scope link \n valid_lft forever preferred_lft forever\n" STEP: checking eth0 MTU inside the VirtualMachineInstance level=info timestamp=2018-07-16T11:35:33.804683Z pos=vmi_networking_test.go:205 component=tests msg="[{1 \r\n$ [$ ]} {3 ip address show eth0\r\n2: eth0: mtu 1450 qdisc pfifo_fast qlen 1000\r\n [2: eth0: mtu 1450 qdisc pfifo_fast qlen 1000\r\n]} {5 link/ether 0a:58:0a:81 [0]}]" STEP: checking the VirtualMachineInstance can send MTU sized frames to another VirtualMachineInstance level=info timestamp=2018-07-16T11:38:39.412675Z pos=utils.go:1190 component=tests namespace=kubevirt-test-default name=testvmi9cn2k kind=VirtualMachineInstance uid=dbe97286-88eb-11e8-bd4c-525500d15501 msg="[{1 \r\n\r\n$ [$ ]} {3 ping 10.129.0.14 -c 1 -w 5 -s 1422\r\nPING 10.129.0.14 (10.129.0.14): 1422 data bytes\r\n\r\n--- 10.129.0.14 ping statistics ---\r\n5 packets transmitted, 0 packets received, 100% packet loss\r\n$ [$ ]} {5 echo $?\r\n1\r\n$ []}]" • ------------------------------ • [SLOW TEST:5.238 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on the same node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ ••• ------------------------------ • [SLOW TEST:5.842 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 with a service matching the vmi exposed /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:283 should be able to reach the vmi based on labels specified on the vmi /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:303 ------------------------------ • ------------------------------ • [SLOW TEST:5.409 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 with a subdomain and a headless service given /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:330 should be able to reach the vmi via its unique fully qualified domain name /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:353 ------------------------------ • [SLOW TEST:33.516 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom interface model /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:379 should expose the right device type to the guest /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:380 ------------------------------ • ------------------------------ • [SLOW TEST:31.445 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:413 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:414 ------------------------------ • [SLOW TEST:33.262 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address in non-conventional format /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:425 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:426 ------------------------------ • [SLOW TEST:34.494 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address and slirp interface /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:438 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:439 ------------------------------ • [SLOW TEST:52.414 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a cirros image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should return that we are running cirros /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:67 ------------------------------ • [SLOW TEST:49.650 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a fedora image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:76 should return that we are running fedora /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:77 ------------------------------ • [SLOW TEST:37.092 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 should be able to reconnect to console multiple times /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:86 ------------------------------ Service cluster-ip-vm successfully exposed for virtualmachineinstance testvmibpn2z • [SLOW TEST:54.445 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:68 Should expose a Cluster IP service on a VM and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:71 ------------------------------ Service node-port-vm successfully exposed for virtualmachineinstance testvmibpn2z • [SLOW TEST:10.679 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose NodePort service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:98 Should expose a NodePort service on a VM and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:103 ------------------------------ Service cluster-ip-udp-vm successfully exposed for virtualmachineinstance testvmi45n89 • [SLOW TEST:51.783 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:140 Expose ClusterIP UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:147 Should expose a ClusterIP service on a VM and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:151 ------------------------------ Service node-port-udp-vm successfully exposed for virtualmachineinstance testvmi45n89 • [SLOW TEST:10.762 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:140 Expose NodePort UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:179 Should expose a NodePort service on a VM and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:184 ------------------------------ Service cluster-ip-vmrs successfully exposed for vmirs replicasettvb9v • [SLOW TEST:64.783 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM replica set /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:227 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:260 Should create a ClusterIP service on VMRS and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:264 ------------------------------ Service cluster-ip-ovm successfully exposed for virtualmachine testvmiq5n66 • [SLOW TEST:52.478 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on an Offline VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:292 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:336 Connect to ClusterIP services that was set when VM was offline /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:337 ------------------------------ • [SLOW TEST:37.415 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:33.348 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:115.769 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:109.596 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:49.998 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:113 should create a writeable emptyDisk with the right capacity /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:115 ------------------------------ • [SLOW TEST:52.481 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined and a specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:163 should create a writeable emptyDisk with the specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:165 ------------------------------ • [SLOW TEST:33.313 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should be successfully started /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:207 ------------------------------ • [SLOW TEST:76.021 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should not persist data /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:218 ------------------------------ • [SLOW TEST:98.983 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With VirtualMachineInstance with two PVCs /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:266 should start vmi multiple times /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:278 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.021 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to start a vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:133 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.013 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to stop a running vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:139 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.011 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have correct UUID /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:192 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.017 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have pod IP /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:208 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.019 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to start a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:242 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.046 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to stop a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:250 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ •• ------------------------------ • [SLOW TEST:19.529 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should update VirtualMachine once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:195 ------------------------------ •• ------------------------------ • [SLOW TEST:26.785 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if it gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:245 ------------------------------ • [SLOW TEST:57.256 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if the VirtualMachineInstance's pod gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:265 ------------------------------ • [SLOW TEST:26.902 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should stop VirtualMachineInstance if running set to false /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:325 ------------------------------ • [SLOW TEST:135.642 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should start and stop VirtualMachineInstance multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:333 ------------------------------ • [SLOW TEST:46.285 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should not update the VirtualMachineInstance spec if Running /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:346 ------------------------------ • [SLOW TEST:193.496 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should survive guest shutdown, multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:387 ------------------------------ • [SLOW TEST:18.883 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should start a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:436 ------------------------------ • [SLOW TEST:30.855 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should stop a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:467 ------------------------------ •••••••••••• Pod name: disks-images-provider-5hjmw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-mdmdp Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-d7nrn Pod phase: Running 2018/07/16 12:07:00 http: TLS handshake error from 10.129.0.1:45658: EOF level=info timestamp=2018-07-16T12:07:02.337733Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-16T12:07:04.340555Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 12:07:10 http: TLS handshake error from 10.129.0.1:45664: EOF level=info timestamp=2018-07-16T12:07:10.614595Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T12:07:14.541351Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T12:07:15.642351Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T12:07:15.680723Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 12:07:20 http: TLS handshake error from 10.129.0.1:45670: EOF level=info timestamp=2018-07-16T12:07:24.849205Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T12:07:25.027284Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/version proto=HTTP/2.0 statusCode=200 contentLength=246 2018/07/16 12:07:30 http: TLS handshake error from 10.129.0.1:45682: EOF level=info timestamp=2018-07-16T12:07:35.092151Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 12:07:40 http: TLS handshake error from 10.129.0.1:45688: EOF level=info timestamp=2018-07-16T12:07:40.707854Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-kgq4g Pod phase: Running 2018/07/16 12:05:41 http: TLS handshake error from 10.128.0.1:53650: EOF 2018/07/16 12:05:51 http: TLS handshake error from 10.128.0.1:53706: EOF 2018/07/16 12:06:01 http: TLS handshake error from 10.128.0.1:53756: EOF 2018/07/16 12:06:11 http: TLS handshake error from 10.128.0.1:53810: EOF 2018/07/16 12:06:21 http: TLS handshake error from 10.128.0.1:53860: EOF 2018/07/16 12:06:31 http: TLS handshake error from 10.128.0.1:54006: EOF 2018/07/16 12:06:41 http: TLS handshake error from 10.128.0.1:54060: EOF 2018/07/16 12:06:51 http: TLS handshake error from 10.128.0.1:54110: EOF 2018/07/16 12:07:01 http: TLS handshake error from 10.128.0.1:54160: EOF 2018/07/16 12:07:11 http: TLS handshake error from 10.128.0.1:54214: EOF level=info timestamp=2018-07-16T12:07:20.385727Z pos=validating-webhook.go:84 component=virt-api msg="rejected vmi admission" 2018/07/16 12:07:21 http: TLS handshake error from 10.128.0.1:54264: EOF 2018/07/16 12:07:31 http: TLS handshake error from 10.128.0.1:54314: EOF level=info timestamp=2018-07-16T12:07:33.120910Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/16 12:07:41 http: TLS handshake error from 10.128.0.1:54368: EOF Pod name: virt-controller-7d57d96b65-p6vnn Pod phase: Running level=info timestamp=2018-07-16T11:31:09.354793Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-v5pw7 Pod phase: Running level=info timestamp=2018-07-16T12:07:22.583774Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihb9z2 kind= uid=cbaf627f-88f0-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T12:07:23.150848Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihb9z2\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihb9z2" level=info timestamp=2018-07-16T12:07:23.557377Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihb9z2\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmihb9z2, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: cbaf627f-88f0-11e8-bd4c-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihb9z2" level=info timestamp=2018-07-16T12:07:23.887321Z pos=preset.go:161 component=virt-controller service=http namespace=kubevirt-test-default name=testvmispztc kind= uid=cc78bd98-88f0-11e8-bd4c-525500d15501 msg="VirtualMachineInstance is excluded from VirtualMachinePresets" level=info timestamp=2018-07-16T12:07:23.890197Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmispztc kind= uid=cc78bd98-88f0-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T12:07:24.166157Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmispztc\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmispztc" level=info timestamp=2018-07-16T12:07:24.355351Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmispztc\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmispztc, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: cc78bd98-88f0-11e8-bd4c-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmispztc" level=info timestamp=2018-07-16T12:07:25.109521Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T12:07:25.109847Z pos=preset.go:202 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="VirtualMachineInstancePreset test-conflict-wnq9w matches VirtualMachineInstance" level=info timestamp=2018-07-16T12:07:25.109949Z pos=preset.go:202 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="VirtualMachineInstancePreset test-memory-b9dnn matches VirtualMachineInstance" level=error timestamp=2018-07-16T12:07:25.111021Z pos=preset.go:362 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="VirtualMachinePresets cannot be applied due to conflicts: presets 'test-memory-b9dnn' and 'test-conflict-wnq9w' conflict: spec.resources.requests[memory]: {{128 6} {} 128M DecimalSI} != {{256 6} {} 256M DecimalSI}" level=warning timestamp=2018-07-16T12:07:25.115140Z pos=preset.go:154 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as failed" level=info timestamp=2018-07-16T12:07:25.115274Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T12:07:25.817993Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T12:07:25.818136Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-8s468 Pod phase: Running level=info timestamp=2018-07-16T11:41:29.915972Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-16T11:41:29.920076Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-16T11:41:29.933718Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmisxztq" level=info timestamp=2018-07-16T11:41:30.172704Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:41:30.181998Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-16T11:41:30.185986Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-16T11:41:30.188414Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-16T11:41:30.188827Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-16T11:41:30.192406Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=error timestamp=2018-07-16T11:41:30.205724Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisxztq\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmisxztq, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 05af3a12-88ed-11e8-bd4c-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-16T11:41:30.205898Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisxztq\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmisxztq, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 05af3a12-88ed-11e8-bd4c-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmisxztq" level=info timestamp=2018-07-16T11:41:30.206555Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:41:30.206666Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:41:30.211687Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:41:30.211969Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-g269k Pod phase: Running level=info timestamp=2018-07-16T12:07:07.392810Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-16T12:07:07.393576Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmimjq58 kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-16T12:07:07.393654Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimjq58 kind= uid=b7eb7afa-88f0-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T12:07:07.395770Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmimjq58 kind= uid=b7eb7afa-88f0-11e8-bd4c-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-16T12:07:07.396448Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmimjq58 kind= uid=b7eb7afa-88f0-11e8-bd4c-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-16T12:07:07.400869Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-16T12:07:07.449664Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimjq58 kind= uid=b7eb7afa-88f0-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T12:07:07.454808Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimjq58 kind= uid=b7eb7afa-88f0-11e8-bd4c-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T12:07:07.455398Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimjq58 kind= uid=b7eb7afa-88f0-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T12:07:19.031986Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmimjq58 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T12:07:19.032586Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimjq58 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T12:07:40.868290Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Processing vmi update" level=error timestamp=2018-07-16T12:07:40.900951Z pos=vm.go:397 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 reason="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" msg="Synchronizing the VirtualMachineInstance failed." level=info timestamp=2018-07-16T12:07:41.005602Z pos=vm.go:251 component=virt-handler reason="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmij48sj" level=info timestamp=2018-07-16T12:07:41.008110Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Processing vmi update" Pod name: virt-launcher-testvmij48sj-vdh7p Pod phase: Running level=info timestamp=2018-07-16T12:07:30.096030Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-16T12:07:30.099639Z pos=libvirt.go:256 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-16T12:07:40.113296Z pos=libvirt.go:271 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-16T12:07:40.185009Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmij48sj" level=info timestamp=2018-07-16T12:07:40.187607Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-16T12:07:40.188094Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" level=error timestamp=2018-07-16T12:07:40.890216Z pos=manager.go:159 component=virt-launcher namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 reason="virError(Code=0, Domain=0, Message='Missing error')" msg="Getting the domain failed." level=error timestamp=2018-07-16T12:07:40.890641Z pos=server.go:68 component=virt-launcher namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 reason="virError(Code=0, Domain=0, Message='Missing error')" msg="Failed to sync vmi" level=info timestamp=2018-07-16T12:07:41.046321Z pos=cloud-init.go:254 component=virt-launcher msg="generated nocloud iso file /var/run/libvirt/kubevirt-ephemeral-disk/cloud-init-data/kubevirt-test-default/testvmij48sj/noCloud.iso" level=error timestamp=2018-07-16T12:07:41.092746Z pos=common.go:126 component=virt-launcher msg="updated MAC for interface: eth0 - 0a:58:0a:6f:a6:21" level=info timestamp=2018-07-16T12:07:41.102484Z pos=converter.go:729 component=virt-launcher msg="Found nameservers in /etc/resolv.conf: \ufffd\ufffdBf" level=info timestamp=2018-07-16T12:07:41.102630Z pos=converter.go:730 component=virt-launcher msg="Found search domains in /etc/resolv.conf: kubevirt-test-default.svc.cluster.local svc.cluster.local cluster.local" level=info timestamp=2018-07-16T12:07:41.103456Z pos=dhcp.go:62 component=virt-launcher msg="Starting SingleClientDHCPServer" level=info timestamp=2018-07-16T12:07:41.162763Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-16T12:07:41.176874Z pos=manager.go:157 component=virt-launcher namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Domain defined." Pod name: disks-images-provider-5hjmw Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-mdmdp Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-d7nrn Pod phase: Running level=info timestamp=2018-07-16T12:10:12.036945Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T12:10:17.477921Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T12:10:17.525599Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T12:10:19.045539Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 12:10:20 http: TLS handshake error from 10.129.0.1:45786: EOF level=info timestamp=2018-07-16T12:10:29.272071Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 12:10:30 http: TLS handshake error from 10.129.0.1:45792: EOF level=info timestamp=2018-07-16T12:10:32.535622Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-16T12:10:39.480705Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 12:10:40 http: TLS handshake error from 10.129.0.1:45798: EOF level=info timestamp=2018-07-16T12:10:42.301831Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T12:10:47.807699Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T12:10:47.824860Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-16T12:10:49.700693Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/16 12:10:50 http: TLS handshake error from 10.129.0.1:45804: EOF Pod name: virt-api-7d79764579-kgq4g Pod phase: Running level=info timestamp=2018-07-16T12:08:56.072463Z pos=subresource.go:78 component=virt-api msg="Websocket connection upgraded" 2018/07/16 12:09:01 http: TLS handshake error from 10.128.0.1:54786: EOF 2018/07/16 12:09:11 http: TLS handshake error from 10.128.0.1:54840: EOF 2018/07/16 12:09:21 http: TLS handshake error from 10.128.0.1:54890: EOF 2018/07/16 12:09:31 http: TLS handshake error from 10.128.0.1:54940: EOF level=info timestamp=2018-07-16T12:09:33.231985Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/16 12:09:41 http: TLS handshake error from 10.128.0.1:54994: EOF 2018/07/16 12:09:51 http: TLS handshake error from 10.128.0.1:55044: EOF 2018/07/16 12:10:01 http: TLS handshake error from 10.128.0.1:55094: EOF level=info timestamp=2018-07-16T12:10:02.979441Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/16 12:10:11 http: TLS handshake error from 10.128.0.1:55148: EOF 2018/07/16 12:10:21 http: TLS handshake error from 10.128.0.1:55198: EOF 2018/07/16 12:10:31 http: TLS handshake error from 10.128.0.1:55248: EOF 2018/07/16 12:10:41 http: TLS handshake error from 10.128.0.1:55302: EOF 2018/07/16 12:10:51 http: TLS handshake error from 10.128.0.1:55352: EOF Pod name: virt-controller-7d57d96b65-p6vnn Pod phase: Running level=info timestamp=2018-07-16T11:31:09.354793Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-v5pw7 Pod phase: Running level=info timestamp=2018-07-16T12:07:22.583774Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihb9z2 kind= uid=cbaf627f-88f0-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T12:07:23.150848Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihb9z2\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihb9z2" level=info timestamp=2018-07-16T12:07:23.557377Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihb9z2\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmihb9z2, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: cbaf627f-88f0-11e8-bd4c-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihb9z2" level=info timestamp=2018-07-16T12:07:23.887321Z pos=preset.go:161 component=virt-controller service=http namespace=kubevirt-test-default name=testvmispztc kind= uid=cc78bd98-88f0-11e8-bd4c-525500d15501 msg="VirtualMachineInstance is excluded from VirtualMachinePresets" level=info timestamp=2018-07-16T12:07:23.890197Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmispztc kind= uid=cc78bd98-88f0-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T12:07:24.166157Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmispztc\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmispztc" level=info timestamp=2018-07-16T12:07:24.355351Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmispztc\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmispztc, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: cc78bd98-88f0-11e8-bd4c-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmispztc" level=info timestamp=2018-07-16T12:07:25.109521Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T12:07:25.109847Z pos=preset.go:202 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="VirtualMachineInstancePreset test-conflict-wnq9w matches VirtualMachineInstance" level=info timestamp=2018-07-16T12:07:25.109949Z pos=preset.go:202 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="VirtualMachineInstancePreset test-memory-b9dnn matches VirtualMachineInstance" level=error timestamp=2018-07-16T12:07:25.111021Z pos=preset.go:362 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="VirtualMachinePresets cannot be applied due to conflicts: presets 'test-memory-b9dnn' and 'test-conflict-wnq9w' conflict: spec.resources.requests[memory]: {{128 6} {} 128M DecimalSI} != {{256 6} {} 256M DecimalSI}" level=warning timestamp=2018-07-16T12:07:25.115140Z pos=preset.go:154 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as failed" level=info timestamp=2018-07-16T12:07:25.115274Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizdwqs kind= uid=cd30292d-88f0-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-16T12:07:25.817993Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-16T12:07:25.818136Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-handler-8s468 Pod phase: Running level=info timestamp=2018-07-16T11:41:29.915972Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-16T11:41:29.920076Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-16T11:41:29.933718Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmisxztq" level=info timestamp=2018-07-16T11:41:30.172704Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:41:30.181998Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-16T11:41:30.185986Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-16T11:41:30.188414Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-16T11:41:30.188827Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-16T11:41:30.192406Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=error timestamp=2018-07-16T11:41:30.205724Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind= uid=05af3a12-88ed-11e8-bd4c-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisxztq\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmisxztq, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 05af3a12-88ed-11e8-bd4c-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-16T11:41:30.205898Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmisxztq\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmisxztq, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 05af3a12-88ed-11e8-bd4c-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmisxztq" level=info timestamp=2018-07-16T11:41:30.206555Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:41:30.206666Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T11:41:30.211687Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-16T11:41:30.211969Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmisxztq kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-g269k Pod phase: Running level=info timestamp=2018-07-16T12:07:19.032586Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmimjq58 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T12:07:40.868290Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Processing vmi update" level=error timestamp=2018-07-16T12:07:40.900951Z pos=vm.go:397 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 reason="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" msg="Synchronizing the VirtualMachineInstance failed." level=info timestamp=2018-07-16T12:07:41.005602Z pos=vm.go:251 component=virt-handler reason="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmij48sj" level=info timestamp=2018-07-16T12:07:41.008110Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-16T12:07:42.017889Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type ADDED" level=info timestamp=2018-07-16T12:07:42.019096Z pos=vm.go:657 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind=Domain uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Domain is in state Paused reason Unknown" level=info timestamp=2018-07-16T12:07:42.712065Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-16T12:07:42.712327Z pos=vm.go:688 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind=Domain uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Domain is in state Running reason Unknown" level=info timestamp=2018-07-16T12:07:42.730993Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-16T12:07:42.771782Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T12:07:42.772293Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="No update processing required" level=info timestamp=2018-07-16T12:07:42.806957Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-16T12:07:42.811595Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-16T12:07:42.825240Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmij48sj-vdh7p Pod phase: Running level=info timestamp=2018-07-16T12:07:41.176874Z pos=manager.go:157 component=virt-launcher namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Domain defined." level=info timestamp=2018-07-16T12:07:41.984424Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-16T12:07:41.997953Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID 27554f19-11a4-462d-bece-2bd5a660fad8" level=info timestamp=2018-07-16T12:07:42.002729Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-16T12:07:42.019799Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T12:07:42.678549Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-16T12:07:42.704946Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Domain started." level=info timestamp=2018-07-16T12:07:42.708248Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T12:07:42.708455Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T12:07:42.713601Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T12:07:42.713752Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-16T12:07:42.729837Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-16T12:07:42.735300Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-16T12:07:42.824265Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmij48sj kind= uid=cd9f1764-88f0-11e8-bd4c-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-16T12:07:43.007726Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 27554f19-11a4-462d-bece-2bd5a660fad8: 143" ------------------------------ • Failure [210.629 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 should have cloud-init data [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:82 Unexpected Warning event received. Expected : Warning not to equal : Warning /root/go/src/kubevirt.io/kubevirt/tests/utils.go:245 ------------------------------ STEP: Starting a VirtualMachineInstance STEP: Waiting the VirtualMachineInstance start level=info timestamp=2018-07-16T12:07:25.678478Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmij48sj-vdh7p" level=info timestamp=2018-07-16T12:07:41.162013Z pos=utils.go:243 component=tests msg="Pod owner ship transferred to the node virt-launcher-testvmij48sj-vdh7p" level=error timestamp=2018-07-16T12:07:41.308822Z pos=utils.go:241 component=tests reason="unexpected warning event received" msg="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" STEP: Expecting the VirtualMachineInstance console STEP: Checking that the VirtualMachineInstance serial console output equals to expected one level=info timestamp=2018-07-16T12:10:55.553491Z pos=vmi_userdata_test.go:72 component=tests namespace=kubevirt-test-default name=testvmij48sj kind=VirtualMachineInstance uid= msg="[{0 []}]" • [SLOW TEST:105.474 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 with injected ssh-key /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:92 should have ssh-key under authorized keys /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:93 ------------------------------ • [SLOW TEST:56.131 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userData source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:118 should process provided cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:119 ------------------------------ • [SLOW TEST:46.621 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 should take user-data from k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:162 ------------------------------ • [SLOW TEST:20.913 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:20.333 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given an vm /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:19.997 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi preset /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:21.297 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi replica set /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:19.403 seconds] VNC /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:54 with VNC connection /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:62 should allow accessing the VNC device /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:64 ------------------------------ •• ------------------------------ • [SLOW TEST:97.254 seconds] Slirp /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:39 should be able to /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 VirtualMachineInstance with slirp interface /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • ------------------------------ • [SLOW TEST:44.665 seconds] Health Monitoring /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:37 A VirtualMachineInstance with a watchdog device /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:56 should be shut down when the watchdog expires /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:57 ------------------------------ • [SLOW TEST:66.988 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting and stopping the same VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:90 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:91 should success multiple times /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:92 ------------------------------ • [SLOW TEST:17.607 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:111 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:112 should not modify the spec on status update /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:113 ------------------------------ • [SLOW TEST:39.375 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting multiple VMIs /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:129 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:130 should success /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:131 ------------------------------ • ------------------------------ • [SLOW TEST:8.073 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should scale /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 to five, to six and then to zero replicas /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •• ------------------------------ • [SLOW TEST:24.992 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should update readyReplicas once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:157 ------------------------------ •• ------------------------------ • [SLOW TEST:5.567 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should not scale when paused and scale when resume /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:223 ------------------------------ •• ------------------------------ • [SLOW TEST:19.436 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should successfully start with hook sidecar annotation /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:60 ------------------------------ • [SLOW TEST:20.998 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should call Collect and OnDefineDomain on the hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:67 ------------------------------ • [SLOW TEST:22.940 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should update domain XML with SM BIOS properties /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:83 ------------------------------ • ------------------------------ • [SLOW TEST:5.004 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:48 Without permissions /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:56 should not be able to access subresource endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:57 ------------------------------ • ------------------------------ • [SLOW TEST:5.467 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization For Version Command /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:63 Without permissions /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:71 should be able to access subresource version endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:72 ------------------------------ • ------------------------------ • [SLOW TEST:18.572 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 should start it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:76 ------------------------------ • [SLOW TEST:19.218 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 should attach virt-launcher to it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:82 ------------------------------ •••• ------------------------------ • [SLOW TEST:35.668 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:170 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Alpine as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:26.223 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:170 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Cirros as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:15.940 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:201 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:202 should retry starting the VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:203 ------------------------------ • [SLOW TEST:18.541 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:201 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:202 should log warning and proceed once the secret is there /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:233 ------------------------------ • [SLOW TEST:48.217 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-launcher crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:281 should be stopped and have Failed phase /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:282 ------------------------------ • [SLOW TEST:32.877 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:304 should recover and continue management /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:305 ------------------------------ • [SLOW TEST:63.336 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler is responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:335 should indicate that a node is ready for vmis /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:336 ------------------------------ • [SLOW TEST:83.508 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler is not responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:366 the node controller should react /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:405 ------------------------------ S [SKIPPING] [0.162 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:458 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-default [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:463 ------------------------------ S [SKIPPING] [0.423 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:458 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-alternative [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:463 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.122 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:519 should enable emulation in virt-launcher [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:531 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:527 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.115 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:519 should be reflected in domain XML [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:568 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:527 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.132 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:519 should request a TUN device but not KVM [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:612 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:527 ------------------------------ •••• ------------------------------ • [SLOW TEST:21.565 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance's Pod /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:764 should result in the VirtualMachineInstance moving to a finalized state /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:765 ------------------------------ • [SLOW TEST:22.682 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:796 with an active pod. /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:797 should result in pod being terminated /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:798 ------------------------------ • [SLOW TEST:23.507 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:796 with grace period greater than 0 /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:821 should run graceful shutdown /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:822 ------------------------------ • [SLOW TEST:33.963 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:873 should be in Failed phase /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:874 ------------------------------ • [SLOW TEST:27.145 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:873 should be left alone by virt-handler /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:901 ------------------------------ Waiting for namespace kubevirt-test-default to be removed, this can take a while ... Waiting for namespace kubevirt-test-alternative to be removed, this can take a while ... Summarizing 4 Failures: [Fail] Configurations New VirtualMachineInstance with all supported drives [It] should have all the device nodes /root/go/src/kubevirt.io/kubevirt/tests/utils.go:245 [Fail] Networking [BeforeEach] should be able to reach the Inbound VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/utils.go:245 [Fail] Networking should be able to reach [It] the Inbound VirtualMachineInstance with custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:224 [Fail] CloudInit UserData A new VirtualMachineInstance with cloudInitNoCloud userDataBase64 source [It] should have cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/utils.go:245 Ran 127 of 140 Specs in 3940.672 seconds FAIL! -- 123 Passed | 4 Failed | 0 Pending | 13 Skipped --- FAIL: TestTests (3940.68s) FAIL make: *** [functest] Error 1 + make cluster-down ./cluster/down.sh