+ export WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + [[ openshift-3.10-release =~ openshift-.* ]] + [[ openshift-3.10-release =~ .*-crio-.* ]] + export KUBEVIRT_PROVIDER=os-3.10.0 + KUBEVIRT_PROVIDER=os-3.10.0 + export KUBEVIRT_NUM_NODES=2 + KUBEVIRT_NUM_NODES=2 + export NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + export NAMESPACE=kube-system + NAMESPACE=kube-system + trap '{ make cluster-down; }' EXIT SIGINT SIGTERM SIGSTOP + make cluster-down ./cluster/down.sh + make cluster-up ./cluster/up.sh Downloading ......................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................... Downloading ....... 2018/08/06 18:03:04 Waiting for host: 192.168.66.102:22 2018/08/06 18:03:07 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/08/06 18:03:15 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/08/06 18:03:23 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/08/06 18:03:28 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: connection refused. Sleeping 5s 2018/08/06 18:03:33 Connected to tcp://192.168.66.102:22 + systemctl stop origin-node.service + rm -rf /etc/origin/ /etc/etcd/ /var/lib/origin /var/lib/etcd/ ++ docker ps -q + containers= + '[' -n '' ']' ++ docker ps -q -a + containers='8d985261fee2 3b39c685d71c 1fe63ccc7eb5 7fb562e6d373 7357d7573809 4cd9d31e59d6 3dd2b4e034df b8bd1bfbd6fe 2b04452ad9d2 5a8d8e270d07 5bcee7ba14b9 d317f6c94b21 249a96e5b5be bf8eede72349 37a4fab28e2d 08ce4e71eea8 e4e0f326f8c6 0a856ce6e727 64a97837a605 4e67e156ec7c 6b2d2d048d59 8b938906ea49 6572244ef7bc b44328453c0f f0bde1a8d259 61984dd36d89 d0b161b029c9 6b12e033271c 7cd73847cb28 c98ef990eea8' + '[' -n '8d985261fee2 3b39c685d71c 1fe63ccc7eb5 7fb562e6d373 7357d7573809 4cd9d31e59d6 3dd2b4e034df b8bd1bfbd6fe 2b04452ad9d2 5a8d8e270d07 5bcee7ba14b9 d317f6c94b21 249a96e5b5be bf8eede72349 37a4fab28e2d 08ce4e71eea8 e4e0f326f8c6 0a856ce6e727 64a97837a605 4e67e156ec7c 6b2d2d048d59 8b938906ea49 6572244ef7bc b44328453c0f f0bde1a8d259 61984dd36d89 d0b161b029c9 6b12e033271c 7cd73847cb28 c98ef990eea8' ']' + docker rm -f 8d985261fee2 3b39c685d71c 1fe63ccc7eb5 7fb562e6d373 7357d7573809 4cd9d31e59d6 3dd2b4e034df b8bd1bfbd6fe 2b04452ad9d2 5a8d8e270d07 5bcee7ba14b9 d317f6c94b21 249a96e5b5be bf8eede72349 37a4fab28e2d 08ce4e71eea8 e4e0f326f8c6 0a856ce6e727 64a97837a605 4e67e156ec7c 6b2d2d048d59 8b938906ea49 6572244ef7bc b44328453c0f f0bde1a8d259 61984dd36d89 d0b161b029c9 6b12e033271c 7cd73847cb28 c98ef990eea8 8d985261fee2 3b39c685d71c 1fe63ccc7eb5 7fb562e6d373 7357d7573809 4cd9d31e59d6 3dd2b4e034df b8bd1bfbd6fe 2b04452ad9d2 5a8d8e270d07 5bcee7ba14b9 d317f6c94b21 249a96e5b5be bf8eede72349 37a4fab28e2d 08ce4e71eea8 e4e0f326f8c6 0a856ce6e727 64a97837a605 4e67e156ec7c 6b2d2d048d59 8b938906ea49 6572244ef7bc b44328453c0f f0bde1a8d259 61984dd36d89 d0b161b029c9 6b12e033271c 7cd73847cb28 c98ef990eea8 2018/08/06 18:03:35 Waiting for host: 192.168.66.101:22 2018/08/06 18:03:38 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/08/06 18:03:46 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/08/06 18:03:54 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/08/06 18:04:02 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/08/06 18:04:10 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/08/06 18:04:15 Connected to tcp://192.168.66.101:22 + inventory_file=/root/inventory + openshift_ansible=/root/openshift-ansible + echo '[new_nodes]' + sed -i '/\[OSEv3:children\]/a new_nodes' /root/inventory + nodes_found=false ++ seq 2 100 + for i in '$(seq 2 100)' ++ printf node%02d 2 + node=node02 ++ printf 192.168.66.1%02d 2 + node_ip=192.168.66.102 + set +e + ping 192.168.66.102 -c 1 PING 192.168.66.102 (192.168.66.102) 56(84) bytes of data. 64 bytes from 192.168.66.102: icmp_seq=1 ttl=64 time=1.49 ms --- 192.168.66.102 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 1.495/1.495/1.495/0.000 ms + '[' 0 -ne 0 ']' + nodes_found=true + set -e + echo '192.168.66.102 node02' Found node02. Adding it to the inventory. + echo 'Found node02. Adding it to the inventory.' + echo 'node02 openshift_node_group_name="node-config-compute" openshift_schedulable=true openshift_ip=192.168.66.102' + for i in '$(seq 2 100)' ++ printf node%02d 3 + node=node03 ++ printf 192.168.66.1%02d 3 + node_ip=192.168.66.103 + set +e + ping 192.168.66.103 -c 1 PING 192.168.66.103 (192.168.66.103) 56(84) bytes of data. From 192.168.66.101 icmp_seq=1 Destination Host Unreachable --- 192.168.66.103 ping statistics --- 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms + '[' 1 -ne 0 ']' + break + '[' true = true ']' + ansible-playbook -i /root/inventory /root/openshift-ansible/playbooks/openshift-node/scaleup.yml PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) PLAY [Ensure there are new_nodes] ********************************************** TASK [fail] ******************************************************************** skipping: [localhost] TASK [fail] ******************************************************************** skipping: [localhost] PLAY [Initialization Checkpoint Start] ***************************************** TASK [Set install initialization 'In Progress'] ******************************** ok: [node01] PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) [WARNING]: Could not match supplied host pattern, ignoring: oo_lb_to_config PLAY [Ensure that all non-node hosts are accessible] *************************** TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Initialize basic host facts] ********************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/deprecations.yml for node01, node02 TASK [openshift_sanitize_inventory : Check for usage of deprecated variables] *** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : debug] ************************************ skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_stats] ******************************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Assign deprecated variables to correct counterparts] *** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml for node01, node02 included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml for node01, node02 TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Standardize on latest variable names] ***** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Normalize openshift_release] ************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/unsupported.yml for node01, node02 TASK [openshift_sanitize_inventory : Ensure that openshift_use_dnsmasq is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that openshift_node_dnsmasq_install_network_manager_hook is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* skipping: [node01] => (item=openshift_hosted_etcd_storage_kind) skipping: [node02] => (item=openshift_hosted_etcd_storage_kind) TASK [openshift_sanitize_inventory : Ensure that dynamic provisioning is set if using dynamic storage] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure clusterid is set along with the cloudprovider] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that all requires vsphere configuration variables are set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : ensure provider configuration variables are defined] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure removed web console extension variables are not set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that web console port matches API server port] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : At least one master is schedulable] ******* skipping: [node01] skipping: [node02] TASK [Detecting Operating System from ostree_booted] *************************** ok: [node02] ok: [node01] TASK [set openshift_deployment_type if unset] ********************************** skipping: [node01] skipping: [node02] TASK [check for node already bootstrapped] ************************************* ok: [node02] ok: [node01] TASK [initialize_facts set fact openshift_is_bootstrapped] ********************* ok: [node01] ok: [node02] TASK [initialize_facts set fact openshift_is_atomic and openshift_is_containerized] *** ok: [node01] ok: [node02] TASK [Determine Atomic Host Docker Version] ************************************ skipping: [node01] skipping: [node02] TASK [assert atomic host docker version is 1.12 or later] ********************** skipping: [node01] skipping: [node02] PLAY [Retrieve existing master configs and validate] *************************** TASK [openshift_control_plane : stat] ****************************************** ok: [node01] TASK [openshift_control_plane : slurp] ***************************************** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [openshift_control_plane : Check for file paths outside of /etc/origin/master in master's config] *** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Initialize special first-master variables] ******************************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Disable web console if required] ***************************************** TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Setup yum repositories for all hosts] ************************************ TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Install Red Hat Subscription manager] ******************* skipping: [node02] TASK [rhel_subscribe : Is host already registered?] **************************** skipping: [node02] TASK [rhel_subscribe : Register host] ****************************************** skipping: [node02] TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Determine if OpenShift Pool Already Attached] *********** skipping: [node02] TASK [rhel_subscribe : Attach to OpenShift Pool] ******************************* skipping: [node02] TASK [rhel_subscribe : Satellite preparation] ********************************** skipping: [node02] TASK [openshift_repos : openshift_repos detect ostree] ************************* ok: [node02] TASK [openshift_repos : Ensure libselinux-python is installed] ***************** ok: [node02] TASK [openshift_repos : Remove openshift_additional.repo file] ***************** ok: [node02] TASK [openshift_repos : Create any additional repos that are defined] ********** TASK [openshift_repos : include_tasks] ***************************************** skipping: [node02] TASK [openshift_repos : include_tasks] ***************************************** included: /root/openshift-ansible/roles/openshift_repos/tasks/centos_repos.yml for node02 TASK [openshift_repos : Configure origin gpg keys] ***************************** ok: [node02] TASK [openshift_repos : Configure correct origin release repository] *********** ok: [node02] => (item=/root/openshift-ansible/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2) TASK [openshift_repos : Ensure clean repo cache in the event repos have been changed manually] *** changed: [node02] => { "msg": "First run of openshift_repos" } TASK [openshift_repos : Record that openshift_repos already ran] *************** ok: [node02] RUNNING HANDLER [openshift_repos : refresh cache] ****************************** changed: [node02] PLAY [Install packages necessary for installer] ******************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Determine if chrony is installed] **************************************** [WARNING]: Consider using the yum, dnf or zypper module rather than running rpm. If you need to use command because yum, dnf or zypper is insufficient you can add warn=False to this command task or set command_warnings=False in ansible.cfg to get rid of this message. changed: [node02] TASK [Install ntp package] ***************************************************** skipping: [node02] TASK [Start and enable ntpd/chronyd] ******************************************* changed: [node02] TASK [Ensure openshift-ansible installer package deps are installed] *********** ok: [node02] => (item=iproute) ok: [node02] => (item=dbus-python) ok: [node02] => (item=PyYAML) ok: [node02] => (item=python-ipaddress) ok: [node02] => (item=libsemanage-python) ok: [node02] => (item=yum-utils) ok: [node02] => (item=python-docker) PLAY [Initialize cluster facts] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [get openshift_current_version] ******************************************* ok: [node02] ok: [node01] TASK [set_fact openshift_portal_net if present on masters] ********************* ok: [node01] ok: [node02] TASK [Gather Cluster facts] **************************************************** changed: [node02] changed: [node01] TASK [Set fact of no_proxy_internal_hostnames] ********************************* skipping: [node01] skipping: [node02] TASK [Initialize openshift.node.sdn_mtu] *************************************** changed: [node02] ok: [node01] PLAY [Initialize etcd host variables] ****************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** ok: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] => { "msg": "openshift_pkg_version was not defined. Falling back to -3.10.0" } TASK [openshift_version : set_fact] ******************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Verify Requirements] ***************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [Run variable sanity checks] ********************************************** ok: [node01] TASK [Validate openshift_node_groups and openshift_node_group_name] ************ ok: [node01] PLAY [Initialization Checkpoint End] ******************************************* TASK [Set install initialization 'Complete'] *********************************** ok: [node01] PLAY [Validate node hostnames] ************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Query DNS for IP address of node02] ************************************** ok: [node02] TASK [Validate openshift_hostname when defined] ******************************** skipping: [node02] TASK [Validate openshift_ip exists on node when defined] *********************** skipping: [node02] PLAY [Configure os_firewall] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [os_firewall : Detecting Atomic Host Operating System] ******************** ok: [node02] TASK [os_firewall : Set fact r_os_firewall_is_atomic] ************************** ok: [node02] TASK [os_firewall : Fail - Firewalld is not supported on Atomic Host] ********** skipping: [node02] TASK [os_firewall : Install firewalld packages] ******************************** skipping: [node02] TASK [os_firewall : Ensure iptables services are not enabled] ****************** skipping: [node02] => (item=iptables) skipping: [node02] => (item=ip6tables) TASK [os_firewall : Wait 10 seconds after disabling iptables] ****************** skipping: [node02] TASK [os_firewall : Start and enable firewalld service] ************************ skipping: [node02] TASK [os_firewall : need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail] *** skipping: [node02] TASK [os_firewall : Restart polkitd] ******************************************* skipping: [node02] TASK [os_firewall : Wait for polkit action to have been created] *************** skipping: [node02] TASK [os_firewall : Ensure firewalld service is not enabled] ******************* ok: [node02] TASK [os_firewall : Wait 10 seconds after disabling firewalld] ***************** skipping: [node02] TASK [os_firewall : Install iptables packages] ********************************* ok: [node02] => (item=iptables) ok: [node02] => (item=iptables-services) TASK [os_firewall : Start and enable iptables service] ************************* ok: [node02 -> node02] => (item=node02) TASK [os_firewall : need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail] *** skipping: [node02] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [container_runtime : Setup the docker-storage for overlay] **************** skipping: [node02] TASK [container_runtime : Create file system on extra volume device] *********** TASK [container_runtime : Create mount entry for extra volume] ***************** PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** ok: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* ok: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Add http_proxy to /etc/atomic.conf] ****************** skipping: [node02] TASK [container_runtime : Add https_proxy to /etc/atomic.conf] ***************** skipping: [node02] TASK [container_runtime : Add no_proxy to /etc/atomic.conf] ******************** skipping: [node02] TASK [container_runtime : Get current installed Docker version] **************** ok: [node02] TASK [container_runtime : Error out if Docker pre-installed but too old] ******* skipping: [node02] TASK [container_runtime : Error out if requested Docker is too old] ************ skipping: [node02] TASK [container_runtime : Install Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure docker.service.d directory exists] ************ ok: [node02] TASK [container_runtime : Configure Docker service unit file] ****************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Set registry params] ********************************* skipping: [node02] => (item={u'reg_conf_var': u'ADD_REGISTRY', u'reg_flag': u'--add-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'BLOCK_REGISTRY', u'reg_flag': u'--block-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'INSECURE_REGISTRY', u'reg_flag': u'--insecure-registry', u'reg_fact_val': []}) TASK [container_runtime : Place additional/blocked/insecure registries in /etc/containers/registries.conf] *** skipping: [node02] TASK [container_runtime : Set Proxy Settings] ********************************** skipping: [node02] => (item={u'reg_conf_var': u'HTTP_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'HTTPS_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'NO_PROXY', u'reg_fact_val': u''}) TASK [container_runtime : Set various Docker options] ************************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Configure Docker Network OPTIONS] ******************** ok: [node02] TASK [container_runtime : Detect if docker is already started] ***************** ok: [node02] TASK [container_runtime : Start the Docker service] **************************** ok: [node02] TASK [container_runtime : set_fact] ******************************************** ok: [node02] TASK [container_runtime : Check for docker_storage_path/overlay2] ************** ok: [node02] TASK [container_runtime : Fixup SELinux permissions for docker] **************** changed: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] TASK [container_runtime : Fail if Atomic Host since this is an rpm request] **** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Check that overlay is in the kernel] ***************** skipping: [node02] TASK [container_runtime : Add overlay to modprobe.d] *************************** skipping: [node02] TASK [container_runtime : Manually modprobe overlay into the kernel] *********** skipping: [node02] TASK [container_runtime : Enable and start systemd-modules-load] *************** skipping: [node02] TASK [container_runtime : Install cri-o] *************************************** skipping: [node02] TASK [container_runtime : Remove CRI-O default configuration files] ************ skipping: [node02] => (item=/etc/cni/net.d/200-loopback.conf) skipping: [node02] => (item=/etc/cni/net.d/100-crio-bridge.conf) TASK [container_runtime : Create the CRI-O configuration] ********************** skipping: [node02] TASK [container_runtime : Ensure CNI configuration directory exists] *********** skipping: [node02] TASK [container_runtime : Add iptables allow rules] **************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove iptables rules] ******************************* TASK [container_runtime : Add firewalld allow rules] *************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove firewalld allow rules] ************************ TASK [container_runtime : Configure the CNI network] *************************** skipping: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-network] ****************** skipping: [node02] TASK [container_runtime : Start the CRI-O service] ***************************** skipping: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* skipping: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** skipping: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** skipping: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** skipping: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Node Preparation Checkpoint Start] *************************************** TASK [Set Node preparation 'In Progress'] ************************************** ok: [node01] PLAY [Only target nodes that have not yet been bootstrapped] ******************* TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [add_host] **************************************************************** skipping: [localhost] => (item=node02) ok: [localhost] => (item=node01) PLAY [Disable excluders] ******************************************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/disable.yml for node02 TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-docker-excluder version detected] ************ skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-docker-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-excluder version detected] ******************* skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** skipping: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] PLAY [Configure nodes] ********************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_cloud_provider : Set cloud provider facts] ********************* skipping: [node02] TASK [openshift_cloud_provider : Create cloudprovider config dir] ************** skipping: [node02] TASK [openshift_cloud_provider : include the defined cloud provider files] ***** skipping: [node02] TASK [openshift_node : fail] *************************************************** skipping: [node02] TASK [openshift_node : Check for NetworkManager service] *********************** ok: [node02] TASK [openshift_node : Set fact using_network_manager] ************************* ok: [node02] TASK [openshift_node : Install dnsmasq] **************************************** ok: [node02] TASK [openshift_node : ensure origin/node directory exists] ******************** changed: [node02] => (item=/etc/origin) changed: [node02] => (item=/etc/origin/node) TASK [openshift_node : Install NetworkManager during node_bootstrap provisioning] *** skipping: [node02] TASK [openshift_node : Install network manager dispatch script] **************** skipping: [node02] TASK [openshift_node : Install dnsmasq configuration] ************************** ok: [node02] TASK [openshift_node : Deploy additional dnsmasq.conf] ************************* skipping: [node02] TASK [openshift_node : Enable dnsmasq] ***************************************** ok: [node02] TASK [openshift_node : Install network manager dispatch script] **************** ok: [node02] TASK [openshift_node : Add iptables allow rules] ******************************* ok: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) ok: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) ok: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) ok: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) ok: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove iptables rules] ********************************** TASK [openshift_node : Add firewalld allow rules] ****************************** skipping: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) skipping: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) skipping: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) skipping: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) skipping: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove firewalld allow rules] *************************** TASK [openshift_node : Checking for journald.conf] ***************************** ok: [node02] TASK [openshift_node : Create journald persistence directories] **************** ok: [node02] TASK [openshift_node : Update journald setup] ********************************** ok: [node02] => (item={u'var': u'Storage', u'val': u'persistent'}) ok: [node02] => (item={u'var': u'Compress', u'val': True}) ok: [node02] => (item={u'var': u'SyncIntervalSec', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitInterval', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitBurst', u'val': 10000}) ok: [node02] => (item={u'var': u'SystemMaxUse', u'val': u'8G'}) ok: [node02] => (item={u'var': u'SystemKeepFree', u'val': u'20%'}) ok: [node02] => (item={u'var': u'SystemMaxFileSize', u'val': u'10M'}) ok: [node02] => (item={u'var': u'MaxRetentionSec', u'val': u'1month'}) ok: [node02] => (item={u'var': u'MaxFileSec', u'val': u'1day'}) ok: [node02] => (item={u'var': u'ForwardToSyslog', u'val': False}) ok: [node02] => (item={u'var': u'ForwardToWall', u'val': False}) TASK [openshift_node : Restart journald] *************************************** skipping: [node02] TASK [openshift_node : Disable swap] ******************************************* ok: [node02] TASK [openshift_node : Install node, clients, and conntrack packages] ********** ok: [node02] => (item={u'name': u'origin-node-3.10.0*'}) ok: [node02] => (item={u'name': u'origin-clients-3.10.0*'}) ok: [node02] => (item={u'name': u'conntrack-tools'}) TASK [openshift_node : Restart cri-o] ****************************************** skipping: [node02] TASK [openshift_node : restart NetworkManager to ensure resolv.conf is present] *** changed: [node02] TASK [openshift_node : sysctl] ************************************************* ok: [node02] TASK [openshift_node : Check for credentials file for registry auth] *********** skipping: [node02] TASK [openshift_node : Create credentials for registry auth] ******************* skipping: [node02] TASK [openshift_node : Create credentials for registry auth (alternative)] ***** skipping: [node02] TASK [openshift_node : Setup ro mount of /root/.docker for containerized hosts] *** skipping: [node02] TASK [openshift_node : Check that node image is present] *********************** changed: [node02] TASK [openshift_node : Pre-pull node image] ************************************ skipping: [node02] TASK [openshift_node : Copy node script to the node] *************************** ok: [node02] TASK [openshift_node : Install Node service file] ****************************** ok: [node02] TASK [openshift_node : Ensure old system path is set] ************************** skipping: [node02] => (item=/etc/origin/openvswitch) skipping: [node02] => (item=/var/lib/kubelet) skipping: [node02] => (item=/opt/cni/bin) TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Copy node container image to ostree storage] ************ skipping: [node02] TASK [openshift_node : Install or Update node system container] **************** skipping: [node02] TASK [openshift_node : Restart network manager to ensure networking configuration is in place] *** skipping: [node02] TASK [openshift_node : Configure Node settings] ******************************** ok: [node02] => (item={u'regex': u'^OPTIONS=', u'line': u'OPTIONS='}) ok: [node02] => (item={u'regex': u'^DEBUG_LOGLEVEL=', u'line': u'DEBUG_LOGLEVEL=2'}) ok: [node02] => (item={u'regex': u'^IMAGE_VERSION=', u'line': u'IMAGE_VERSION=v3.10.0-rc.0'}) TASK [openshift_node : Configure Proxy Settings] ******************************* skipping: [node02] => (item={u'regex': u'^HTTP_PROXY=', u'line': u'HTTP_PROXY='}) skipping: [node02] => (item={u'regex': u'^HTTPS_PROXY=', u'line': u'HTTPS_PROXY='}) skipping: [node02] => (item={u'regex': u'^NO_PROXY=', u'line': u'NO_PROXY=[],172.30.0.0/16,10.128.0.0/14'}) TASK [openshift_node : file] *************************************************** skipping: [node02] TASK [openshift_node : Create the Node config] ********************************* changed: [node02] TASK [openshift_node : Configure Node Environment Variables] ******************* TASK [openshift_node : Ensure the node static pod directory exists] ************ changed: [node02] TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Install NFS storage plugin dependencies] **************** ok: [node02] TASK [openshift_node : Check for existence of nfs sebooleans] ****************** ok: [node02] => (item=virt_use_nfs) ok: [node02] => (item=virt_sandbox_use_nfs) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-06 18:11:31.003309', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.018381', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-08-06 18:11:30.984928', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-06 18:11:32.181796', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.015238', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-08-06 18:11:32.166558', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-06 18:11:31.003309', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.018381', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-08-06 18:11:30.984928', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-06 18:11:32.181796', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.015238', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-08-06 18:11:32.166558', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install GlusterFS storage plugin dependencies] ********** ok: [node02] TASK [openshift_node : Check for existence of fusefs sebooleans] *************** ok: [node02] => (item=virt_use_fusefs) ok: [node02] => (item=virt_sandbox_use_fusefs) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-06 18:11:37.957683', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.017398', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-08-06 18:11:37.940285', '_ansible_ignore_errors': None, 'failed': False}) ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-06 18:11:39.109449', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.019644', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-08-06 18:11:39.089805', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-06 18:11:37.957683', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.017398', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-08-06 18:11:37.940285', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-08-06 18:11:39.109449', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.019644', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-08-06 18:11:39.089805', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install Ceph storage plugin dependencies] *************** ok: [node02] TASK [openshift_node : Install iSCSI storage plugin dependencies] ************** ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=device-mapper-multipath) TASK [openshift_node : restart services] *************************************** ok: [node02] => (item=multipathd) ok: [node02] => (item=rpcbind) ok: [node02] => (item=iscsid) TASK [openshift_node : Template multipath configuration] *********************** changed: [node02] TASK [openshift_node : Enable and start multipath] ***************************** changed: [node02] TASK [tuned : Check for tuned package] ***************************************** ok: [node02] TASK [tuned : Set tuned OpenShift variables] *********************************** ok: [node02] TASK [tuned : Ensure directory structure exists] ******************************* ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1533205381.9317749, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1533205381.9317749}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1533205381.9327748, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1533205381.9327748}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1533205381.9327748, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1533205381.9327748}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1533205381.9327748, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1533205381.9327748, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1533205381.9327748, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1533205381.9327748, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1533205381.9327748, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1533205381.9327748, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': u's0', 'seuser': u'unconfined_u', 'serole': u'object_r', 'ctime': 1533205381.9327748, 'state': u'file', 'gid': 0, 'mode': u'0644', 'mtime': 1533205381.9327748, 'owner': u'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': u'admin_home_t'}) TASK [tuned : Ensure files are populated from templates] *********************** skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1533205381.9317749, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1533205381.9317749}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1533205381.9327748, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1533205381.9327748}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1533205381.9327748, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1533205381.9327748}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1533205381.9327748, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1533205381.9327748, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1533205381.9327748, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1533205381.9327748, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1533205381.9327748, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1533205381.9327748, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1533205381.9327748, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1533205381.9327748, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Make tuned use the recommended tuned profile on restart] ********* changed: [node02] => (item=/etc/tuned/active_profile) changed: [node02] => (item=/etc/tuned/profile_mode) TASK [tuned : Restart tuned service] ******************************************* changed: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Install logrotate] ******* ok: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Setup logrotate.d scripts] *** PLAY [node bootstrap config] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_node : install needed rpm(s)] ********************************** ok: [node02] => (item=origin-node) ok: [node02] => (item=origin-docker-excluder) ok: [node02] => (item=ansible) ok: [node02] => (item=bash-completion) ok: [node02] => (item=docker) ok: [node02] => (item=haproxy) ok: [node02] => (item=dnsmasq) ok: [node02] => (item=ntp) ok: [node02] => (item=logrotate) ok: [node02] => (item=httpd-tools) ok: [node02] => (item=bind-utils) ok: [node02] => (item=firewalld) ok: [node02] => (item=libselinux-python) ok: [node02] => (item=conntrack-tools) ok: [node02] => (item=openssl) ok: [node02] => (item=iproute) ok: [node02] => (item=python-dbus) ok: [node02] => (item=PyYAML) ok: [node02] => (item=yum-utils) ok: [node02] => (item=glusterfs-fuse) ok: [node02] => (item=device-mapper-multipath) ok: [node02] => (item=nfs-utils) ok: [node02] => (item=cockpit-ws) ok: [node02] => (item=cockpit-system) ok: [node02] => (item=cockpit-bridge) ok: [node02] => (item=cockpit-docker) ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=ceph-common) TASK [openshift_node : create the directory for node] ************************** skipping: [node02] TASK [openshift_node : laydown systemd override] ******************************* skipping: [node02] TASK [openshift_node : update the sysconfig to have necessary variables] ******* ok: [node02] => (item={u'regexp': u'^KUBECONFIG=.*', u'line': u'KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig'}) TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : disable origin-node service] **************************** changed: [node02] => (item=origin-node.service) TASK [openshift_node : Check for RPM generated config marker file .config_managed] *** ok: [node02] TASK [openshift_node : create directories for bootstrapping] ******************* ok: [node02] => (item=/root/openshift_bootstrap) changed: [node02] => (item=/var/lib/origin/openshift.local.config) changed: [node02] => (item=/var/lib/origin/openshift.local.config/node) ok: [node02] => (item=/etc/docker/certs.d/docker-registry.default.svc:5000) TASK [openshift_node : laydown the bootstrap.yml file for on boot configuration] *** ok: [node02] TASK [openshift_node : Create a symlink to the node client CA for the docker registry] *** ok: [node02] TASK [openshift_node : Remove RPM generated config files if present] *********** skipping: [node02] => (item=master) skipping: [node02] => (item=.config_managed) TASK [openshift_node : find all files in /etc/origin/node so we can remove them] *** skipping: [node02] TASK [openshift_node : Remove everything except the resolv.conf required for node] *** skipping: [node02] TASK [openshift_node_group : create node config template] ********************** changed: [node02] TASK [openshift_node_group : remove existing node config] ********************** changed: [node02] TASK [openshift_node_group : Ensure required directories are present] ********** ok: [node02] => (item=/etc/origin/node/pods) changed: [node02] => (item=/etc/origin/node/certificates) TASK [openshift_node_group : Update the sysconfig to group "node-config-compute"] *** changed: [node02] TASK [set_fact] **************************************************************** ok: [node02] PLAY [Re-enable excluder if it was previously enabled] ************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/enable.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] PLAY [Node Preparation Checkpoint End] ***************************************** TASK [Set Node preparation 'Complete'] ***************************************** ok: [node01] PLAY [Distribute bootstrap and start nodes] ************************************ TASK [openshift_node : Gather node information] ******************************** changed: [node02] ok: [node01] TASK [openshift_node : Copy master bootstrap config locally] ******************* ok: [node02 -> node01] TASK [openshift_node : Distribute bootstrap kubeconfig if one does not exist] *** ok: [node01] changed: [node02] TASK [openshift_node : Start and enable node for bootstrapping] **************** changed: [node02] changed: [node01] TASK [openshift_node : Get node logs] ****************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : debug] ************************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : fail] *************************************************** skipping: [node02] skipping: [node01] PLAY [Approve any pending CSR requests from inventory nodes] ******************* TASK [Dump all candidate bootstrap hostnames] ********************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Find all hostnames for bootstrapping] ************************************ ok: [node01] TASK [Dump the bootstrap hostnames] ******************************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Approve bootstrap nodes] ************************************************* changed: [node01] TASK [Get CSRs] **************************************************************** skipping: [node01] TASK [Report approval errors] ************************************************** skipping: [node01] PLAY [Ensure any inventory labels are applied to the nodes] ******************** TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_manage_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_manage_node : Wait for Node Registration] ********************** ok: [node02 -> node01] ok: [node01 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/config.yml for node02, node01 TASK [openshift_manage_node : Set node schedulability] ************************* ok: [node01 -> node01] ok: [node02 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/set_default_node_role.yml for node02, node01 TASK [openshift_manage_node : Retrieve nodes that are marked with the infra selector or the legacy infra selector] *** ok: [node02 -> node01] TASK [openshift_manage_node : Label infra or legacy infra nodes with the new role label] *** TASK [openshift_manage_node : Retrieve non-infra, non-master nodes that are not yet labeled compute] *** ok: [node02 -> node01] TASK [openshift_manage_node : label non-master non-infra nodes compute] ******** TASK [openshift_manage_node : Label all-in-one master as a compute node] ******* skipping: [node02] PLAY RECAP ********************************************************************* localhost : ok=30 changed=0 unreachable=0 failed=0 node01 : ok=71 changed=3 unreachable=0 failed=0 node02 : ok=155 changed=33 unreachable=0 failed=0 INSTALLER STATUS *************************************************************** Initialization : Complete (0:03:28) Node Preparation : Complete (0:04:12) + set +e + crio=false + grep crio /root/inventory + '[' 1 -eq 0 ']' + set -e + cat + ansible-playbook -i /root/inventory post_deployment_configuration --extra-vars=crio=false PLAY [nodes, new_nodes] ******************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [replace] ***************************************************************** skipping: [node01] skipping: [node02] TASK [replace] ***************************************************************** skipping: [node01] skipping: [node02] TASK [service] ***************************************************************** skipping: [node01] skipping: [node02] PLAY RECAP ********************************************************************* node01 : ok=1 changed=0 unreachable=0 failed=0 node02 : ok=1 changed=0 unreachable=0 failed=0 + set -x + /usr/bin/oc get nodes --no-headers node01 Ready compute,infra,master 4d v1.10.0+b81c8f8 node02 Ready compute 34s v1.10.0+b81c8f8 + os_rc=0 + retry_counter=0 + [[ 0 -lt 20 ]] + [[ 0 -ne 0 ]] + /usr/bin/oc create -f /tmp/local-volume.yaml storageclass.storage.k8s.io "local" created configmap "local-storage-config" created clusterrolebinding.rbac.authorization.k8s.io "local-storage-provisioner-pv-binding" created clusterrole.rbac.authorization.k8s.io "local-storage-provisioner-node-clusterrole" created clusterrolebinding.rbac.authorization.k8s.io "local-storage-provisioner-node-binding" created role.rbac.authorization.k8s.io "local-storage-provisioner-jobs-role" created rolebinding.rbac.authorization.k8s.io "local-storage-provisioner-jobs-rolebinding" created serviceaccount "local-storage-admin" created daemonset.extensions "local-volume-provisioner" created Sending file modes: C0755 110489328 oc Sending file modes: C0600 5645 admin.kubeconfig Cluster "node01:8443" set. Cluster "node01:8443" set. + set +e + kubectl get nodes --no-headers + cluster/kubectl.sh get nodes --no-headers node01 Ready compute,infra,master 4d v1.10.0+b81c8f8 node02 Ready compute 58s v1.10.0+b81c8f8 + kubectl_rc=0 + '[' 0 -ne 0 ']' ++ kubectl get nodes --no-headers ++ grep NotReady ++ cluster/kubectl.sh get nodes --no-headers + '[' -n '' ']' + set -e + echo 'Nodes are ready:' Nodes are ready: + kubectl get nodes + cluster/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION node01 Ready compute,infra,master 4d v1.10.0+b81c8f8 node02 Ready compute 59s v1.10.0+b81c8f8 + make cluster-sync ./cluster/build.sh Building ... Untagged: localhost:33211/kubevirt/virt-controller:devel Untagged: localhost:33211/kubevirt/virt-controller@sha256:9a3dcb5db8d8c9f6cf489d6d0ebeb2a8714582b11134747ad54d44720d1e15db Deleted: sha256:2c510cb3e1ed48808f5da5dd051c741bab03c6bfd310226477d83dc6165fbe24 Deleted: sha256:d6e65c2b871bb32b2e10a03d8c0a8bc58e197322860b4345964eb7c85a3adf3c Deleted: sha256:2030cbc32421a315fe2874aa68ae39acb3628cb87ec9635d9b82b0308bf76d8d Deleted: sha256:0e4bae84a64907715b8f7dc60cd780d1938fc86fcd08edefda7eaefd8190973d Untagged: localhost:33211/kubevirt/virt-launcher:devel Untagged: localhost:33211/kubevirt/virt-launcher@sha256:7326fd5803887586390bd1c93e46e003257818b934eca48d6e8731b75dad0cd7 Deleted: sha256:63647685dc27751fd667e2c4887d63ec8ff92212fbb4ee6fa4189d905afa5473 Deleted: sha256:843c93b9f5161ed7256f5dbfdfa5b265e0cf08e75e70cedefd72c01caecff7e8 Deleted: sha256:477a5ca2d048ab0600708efcf5ea476195351e805617a3612a45f715c5dbfc1b Deleted: sha256:e9b6956d5fb1ddb89d089c2201a914249ea855aa86a738684ac7b438ea56a88f Deleted: sha256:bc0b8b00426ca0ea38777366dffbbb6d3c87aa164970c74c36493ef4edc2b9ea Deleted: sha256:e1143a2b37e3b93aba6c213ab2d8fb84a08d94bb37b514a5040ca45daf90fc62 Deleted: sha256:618da39eae8bd9a9101b77d593da91ffd3d9cfb46c15d0385fee8443277e57ca Deleted: sha256:967c794aefcd8d210db7c3a7b85148a25518c655d5fdd1475f1cee435100b1ba Deleted: sha256:1a5a8c34eea1e5ca4077b4803d35ca5fbfc0e0b6dfea29d6da78723d8cc350b7 Deleted: sha256:56dbedd1add9f1c85b981c370ee3ce420bf1005fd77651170178ac50d68fd9fa Deleted: sha256:51735f4030e0aa8f828993354889f6856a1008facb1ab25c5b5a72e6a8509d63 Deleted: sha256:3b2a38de0f465c140e1f77f5456d382c81bff8c38dce955674bbe544b3a3176a Untagged: localhost:33211/kubevirt/virt-handler:devel Untagged: localhost:33211/kubevirt/virt-handler@sha256:ad4620f9dc3c540be2875b097162310337061013dcd11ae3a35a061438feadcb Deleted: sha256:8eea98cd85563b6f9e8946015cde417dc74e2819070250c7436d75ebd1bdca66 Deleted: sha256:09b013359d8dd99e4ea7a713ecc536545a37ce72ac8a1aa404bde853d908057e Deleted: sha256:82baac51ce25cad6f16821caa4c7c350673f4669d4e4485a01750ea9042d08b9 Deleted: sha256:4df4e4ecd655f5fd9a20f294333758bdf388bacb88eca17ed3d2d28c4803284b Untagged: localhost:33211/kubevirt/virt-api:devel Untagged: localhost:33211/kubevirt/virt-api@sha256:97465db7470a5a1b56dd20da104b8cc956e80c674d78ad84faba89d226978206 Deleted: sha256:1e7a2287206bb2d32557fd148a670cf147edb7533308aa14edb63b6038f3591d Deleted: sha256:d079c109fadd2b2e2953b3262bced681211e5602c38c68d82a12d4fbe708076d Deleted: sha256:51f7444b01d84e2b44f38c25a3bf414134f991e7f1d80fb13dc3c50d97bd3594 Deleted: sha256:bd1e45de30974ce276f3f84452b6ea6d8ead6faec55a80cb4eb662d42a69b729 Untagged: localhost:33211/kubevirt/subresource-access-test:devel Untagged: localhost:33211/kubevirt/subresource-access-test@sha256:f866c455b753ba059da5b038740b405f891666ab47ecbd798cd779b15de7c527 Deleted: sha256:10c9c5d5d797367ec4b2c803b5cb16dfcf746f060a83dec114f081d2739619ce Deleted: sha256:aaf21ab7a64692dcb1ed3dc9cf8d3a323888d56720db203a5eb9884b33ed9515 Deleted: sha256:b028d8bec9de94c1d5892b0ab2a547b04bcfbf1e3c0c71a45b425c70ad678f48 Deleted: sha256:2d95ae47cc8fe874c1868220ff5198aabd299d5777dcbec9698d7c131c587e4f Untagged: localhost:33211/kubevirt/example-hook-sidecar:devel Untagged: localhost:33211/kubevirt/example-hook-sidecar@sha256:2190812efcc9f157f8b30de28814063963c8a75654a339d635753a20468f875f Deleted: sha256:75cd4c985e2844a44216fa6324e2843b1c28843d6d3762eaa3fdb350b85d9565 Deleted: sha256:00edfc04c9c70e2f632c25a92cb94a468112e27aea5642cd4d22af353285f3a3 Deleted: sha256:69ae59b7016ce27ad0d5ff423bff59141b8a359c6fac80908ed35af493bf529d Deleted: sha256:b9f9fb01936961cf5a55ef6411c5a811d2f546bcbb9d8d1362390aee67c986d5 Sending build context to Docker daemon 5.632 kB Step 1/12 : FROM fedora:28 ---> cc510acfcd70 Step 2/12 : ENV LIBVIRT_VERSION 4.2.0 ---> Using cache ---> 2c20d91d48e6 Step 3/12 : RUN curl --output /etc/yum.repos.d/fedora-virt-preview.repo https://fedorapeople.org/groups/virt/virt-preview/fedora-virt-preview.repo ---> Using cache ---> b7c63cc1ff31 Step 4/12 : RUN dnf -y install libvirt-devel-${LIBVIRT_VERSION} make git mercurial sudo gcc findutils gradle rsync-daemon rsync qemu-img protobuf-compiler && dnf -y clean all ---> Using cache ---> 784786580231 Step 5/12 : ENV GIMME_GO_VERSION 1.10 ---> Using cache ---> e6c3df647782 Step 6/12 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> aa4d2ed226ed Step 7/12 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 9d8471eeeed9 Step 8/12 : ADD rsyncd.conf /etc/rsyncd.conf ---> Using cache ---> 35522f6166d0 Step 9/12 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/mattn/goveralls && go get -u github.com/Masterminds/glide && go get golang.org/x/tools/cmd/goimports && git clone https://github.com/mvdan/sh.git $GOPATH/src/mvdan.cc/sh && cd /go/src/mvdan.cc/sh/cmd/shfmt && git checkout v2.5.0 && go get mvdan.cc/sh/cmd/shfmt && go install && go get -u github.com/golang/mock/gomock && go get -u github.com/rmohr/mock/mockgen && go get -u github.com/rmohr/go-swagger-utils/swagger-doc && go get -u github.com/onsi/ginkgo/ginkgo && go get -u -d k8s.io/code-generator/cmd/deepcopy-gen && go get -u -d k8s.io/code-generator/cmd/defaulter-gen && go get -u -d k8s.io/code-generator/cmd/openapi-gen && cd /go/src/k8s.io/code-generator/cmd/deepcopy-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/defaulter-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/openapi-gen && git checkout release-1.9 && go install && go get -u -d github.com/golang/protobuf/protoc-gen-go && cd /go/src/github.com/golang/protobuf/protoc-gen-go && git checkout 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 && go install ---> Using cache ---> a36d9a9a5e01 Step 10/12 : RUN pip install j2cli ---> Using cache ---> 87206820d410 Step 11/12 : ADD entrypoint.sh /entrypoint.sh ---> Using cache ---> b3a27a114600 Step 12/12 : ENTRYPOINT /entrypoint.sh ---> Using cache ---> ceba12cbc33e Successfully built ceba12cbc33e go version go1.10 linux/amd64 go version go1.10 linux/amd64 make[1]: Entering directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' hack/dockerized "./hack/check.sh && KUBEVIRT_VERSION= ./hack/build-go.sh install " && ./hack/build-copy-artifacts.sh Sending build context to Docker daemon 5.632 kB Step 1/12 : FROM fedora:28 ---> cc510acfcd70 Step 2/12 : ENV LIBVIRT_VERSION 4.2.0 ---> Using cache ---> 2c20d91d48e6 Step 3/12 : RUN curl --output /etc/yum.repos.d/fedora-virt-preview.repo https://fedorapeople.org/groups/virt/virt-preview/fedora-virt-preview.repo ---> Using cache ---> b7c63cc1ff31 Step 4/12 : RUN dnf -y install libvirt-devel-${LIBVIRT_VERSION} make git mercurial sudo gcc findutils gradle rsync-daemon rsync qemu-img protobuf-compiler && dnf -y clean all ---> Using cache ---> 784786580231 Step 5/12 : ENV GIMME_GO_VERSION 1.10 ---> Using cache ---> e6c3df647782 Step 6/12 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> aa4d2ed226ed Step 7/12 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 9d8471eeeed9 Step 8/12 : ADD rsyncd.conf /etc/rsyncd.conf ---> Using cache ---> 35522f6166d0 Step 9/12 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/mattn/goveralls && go get -u github.com/Masterminds/glide && go get golang.org/x/tools/cmd/goimports && git clone https://github.com/mvdan/sh.git $GOPATH/src/mvdan.cc/sh && cd /go/src/mvdan.cc/sh/cmd/shfmt && git checkout v2.5.0 && go get mvdan.cc/sh/cmd/shfmt && go install && go get -u github.com/golang/mock/gomock && go get -u github.com/rmohr/mock/mockgen && go get -u github.com/rmohr/go-swagger-utils/swagger-doc && go get -u github.com/onsi/ginkgo/ginkgo && go get -u -d k8s.io/code-generator/cmd/deepcopy-gen && go get -u -d k8s.io/code-generator/cmd/defaulter-gen && go get -u -d k8s.io/code-generator/cmd/openapi-gen && cd /go/src/k8s.io/code-generator/cmd/deepcopy-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/defaulter-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/openapi-gen && git checkout release-1.9 && go install && go get -u -d github.com/golang/protobuf/protoc-gen-go && cd /go/src/github.com/golang/protobuf/protoc-gen-go && git checkout 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 && go install ---> Using cache ---> a36d9a9a5e01 Step 10/12 : RUN pip install j2cli ---> Using cache ---> 87206820d410 Step 11/12 : ADD entrypoint.sh /entrypoint.sh ---> Using cache ---> b3a27a114600 Step 12/12 : ENTRYPOINT /entrypoint.sh ---> Using cache ---> ceba12cbc33e Successfully built ceba12cbc33e go version go1.10 linux/amd64 go version go1.10 linux/amd64 find: '/root/go/src/kubevirt.io/kubevirt/_out/cmd': No such file or directory Compiling tests... compiled tests.test hack/build-docker.sh build Sending build context to Docker daemon 40.37 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-controller ---> Using cache ---> b4f3251c6468 Step 4/8 : WORKDIR /home/virt-controller ---> Using cache ---> 813752072d9d Step 5/8 : USER 1001 ---> Using cache ---> 88b3556f36b4 Step 6/8 : COPY virt-controller /usr/bin/virt-controller ---> 4961d79db8f3 Removing intermediate container 44a9a58c28bc Step 7/8 : ENTRYPOINT /usr/bin/virt-controller ---> Running in 697bc1af86b8 ---> 9631897f91bb Removing intermediate container 697bc1af86b8 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-controller" '' ---> Running in 1fc3b8270e42 ---> 48f5f353e481 Removing intermediate container 1fc3b8270e42 Successfully built 48f5f353e481 Sending build context to Docker daemon 43.31 MB Step 1/10 : FROM kubevirt/libvirt:4.2.0 ---> 5f0bfe81a3e0 Step 2/10 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 09010a005182 Step 3/10 : RUN dnf -y install socat genisoimage util-linux libcgroup-tools ethtool net-tools sudo && dnf -y clean all && test $(id -u qemu) = 107 # make sure that the qemu user really is 107 ---> Using cache ---> fc9481693838 Step 4/10 : COPY virt-launcher /usr/bin/virt-launcher ---> 58a37f446e22 Removing intermediate container 8a02cc5a9d6e Step 5/10 : COPY kubevirt-sudo /etc/sudoers.d/kubevirt ---> 4b69a2f73225 Removing intermediate container 2f370b4c43fb Step 6/10 : RUN setcap CAP_NET_BIND_SERVICE=+eip /usr/bin/qemu-system-x86_64 ---> Running in c7067467ad0e  ---> 6fe35f86c89b Removing intermediate container c7067467ad0e Step 7/10 : RUN mkdir -p /usr/share/kubevirt/virt-launcher ---> Running in ada1b0189e24  ---> 8a381439c324 Removing intermediate container ada1b0189e24 Step 8/10 : COPY entrypoint.sh libvirtd.sh sock-connector /usr/share/kubevirt/virt-launcher/ ---> de2c042c2c18 Removing intermediate container ff2311355898 Step 9/10 : ENTRYPOINT /usr/share/kubevirt/virt-launcher/entrypoint.sh ---> Running in ceec6d22d2eb ---> b01e337d6190 Removing intermediate container ceec6d22d2eb Step 10/10 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-launcher" '' ---> Running in a8dc5bd52192 ---> 5c8bc99ed796 Removing intermediate container a8dc5bd52192 Successfully built 5c8bc99ed796 Sending build context to Docker daemon 41.69 MB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/5 : COPY virt-handler /usr/bin/virt-handler ---> daed2e097513 Removing intermediate container def5053d3447 Step 4/5 : ENTRYPOINT /usr/bin/virt-handler ---> Running in 98a4500c0fca ---> 36212b031f6f Removing intermediate container 98a4500c0fca Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-handler" '' ---> Running in ffbf3247d181 ---> 4bf6bb616ede Removing intermediate container ffbf3247d181 Successfully built 4bf6bb616ede Sending build context to Docker daemon 38.84 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-api ---> Using cache ---> 3cff23506e80 Step 4/8 : WORKDIR /home/virt-api ---> Using cache ---> e94c5606b96b Step 5/8 : USER 1001 ---> Using cache ---> af16317199f5 Step 6/8 : COPY virt-api /usr/bin/virt-api ---> 8d5a1d453fed Removing intermediate container 5f3cb82a216f Step 7/8 : ENTRYPOINT /usr/bin/virt-api ---> Running in efe4b72be4e6 ---> 9e47903dff22 Removing intermediate container efe4b72be4e6 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-api" '' ---> Running in 3e13821565c6 ---> 0f7d1675a8c9 Removing intermediate container 3e13821565c6 Successfully built 0f7d1675a8c9 Sending build context to Docker daemon 4.096 kB Step 1/7 : FROM fedora:28 ---> cc510acfcd70 Step 2/7 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/7 : ENV container docker ---> Using cache ---> aed3ca4ac3a3 Step 4/7 : RUN mkdir -p /images/custom /images/alpine && truncate -s 64M /images/custom/disk.img && curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /images/alpine/disk.img ---> Using cache ---> c7d0cf8fc982 Step 5/7 : ADD entrypoint.sh / ---> Using cache ---> 0393e5ee0c37 Step 6/7 : CMD /entrypoint.sh ---> Using cache ---> 23798f49dea3 Step 7/7 : LABEL "disks-images-provider" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Using cache ---> 94ce40445be4 Successfully built 94ce40445be4 Sending build context to Docker daemon 2.56 kB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/5 : ENV container docker ---> Using cache ---> aed3ca4ac3a3 Step 4/5 : RUN dnf -y install procps-ng nmap-ncat && dnf -y clean all ---> Using cache ---> d8c990eaf575 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "vm-killer" '' ---> Using cache ---> 959b46fa16f1 Successfully built 959b46fa16f1 Sending build context to Docker daemon 5.12 kB Step 1/7 : FROM debian:sid ---> 68f33cf86aab Step 2/7 : MAINTAINER "David Vossel" \ ---> Using cache ---> 50fc79ebe51c Step 3/7 : ENV container docker ---> Using cache ---> b8e063496923 Step 4/7 : RUN apt-get update && apt-get install -y bash curl bzip2 qemu-utils && mkdir -p /disk && rm -rf /var/lib/apt/lists/* ---> Using cache ---> 8adb1572b35c Step 5/7 : ADD entry-point.sh / ---> Using cache ---> 8c0c5a52e4df Step 6/7 : CMD /entry-point.sh ---> Using cache ---> 1a4b838e5dee Step 7/7 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "registry-disk-v1alpha" '' ---> Using cache ---> 6cd99eac1b26 Successfully built 6cd99eac1b26 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:33392/kubevirt/registry-disk-v1alpha:devel ---> 6cd99eac1b26 Step 2/4 : MAINTAINER "David Vossel" \ ---> Using cache ---> daf02fbc6053 Step 3/4 : RUN curl https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img > /disk/cirros.img ---> Using cache ---> 7dad63217a8a Step 4/4 : LABEL "cirros-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Using cache ---> 4040e403422c Successfully built 4040e403422c Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:33392/kubevirt/registry-disk-v1alpha:devel ---> 6cd99eac1b26 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 2b3f11794cd2 Step 3/4 : RUN curl -g -L https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 > /disk/fedora.qcow2 ---> Using cache ---> 42ceecc2fbea Step 4/4 : LABEL "fedora-cloud-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Using cache ---> 5f0ffa61a701 Successfully built 5f0ffa61a701 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:33392/kubevirt/registry-disk-v1alpha:devel ---> 6cd99eac1b26 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 2b3f11794cd2 Step 3/4 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /disk/alpine.iso ---> Using cache ---> 48562b170460 Step 4/4 : LABEL "alpine-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Using cache ---> 414ca08aaecd Successfully built 414ca08aaecd Sending build context to Docker daemon 35.59 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virtctl ---> Using cache ---> d74088d7a4fc Step 4/8 : WORKDIR /home/virtctl ---> Using cache ---> c8c857bf8d96 Step 5/8 : USER 1001 ---> Using cache ---> 36730a67b946 Step 6/8 : COPY subresource-access-test /subresource-access-test ---> 2941c653b3d1 Removing intermediate container 0c2e93ffafb6 Step 7/8 : ENTRYPOINT /subresource-access-test ---> Running in 97b6e4a4ee61 ---> 7c83a532af7a Removing intermediate container 97b6e4a4ee61 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "subresource-access-test" '' ---> Running in a6ef1b8ae237 ---> ec88a19eb17e Removing intermediate container a6ef1b8ae237 Successfully built ec88a19eb17e Sending build context to Docker daemon 3.072 kB Step 1/9 : FROM fedora:28 ---> cc510acfcd70 Step 2/9 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/9 : ENV container docker ---> Using cache ---> aed3ca4ac3a3 Step 4/9 : RUN dnf -y install make git gcc && dnf -y clean all ---> Using cache ---> 6050b24a5d85 Step 5/9 : ENV GIMME_GO_VERSION 1.9.2 ---> Using cache ---> 0447d2178073 Step 6/9 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> 291db82d955f Step 7/9 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 793556477837 Step 8/9 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/masterzen/winrm-cli ---> Using cache ---> fd5c6e1f9461 Step 9/9 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "winrmcli" '' ---> Using cache ---> a223dce236ce Successfully built a223dce236ce Sending build context to Docker daemon 36.8 MB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 71a8c548e503 Step 3/5 : COPY example-hook-sidecar /example-hook-sidecar ---> 68a64f9a0790 Removing intermediate container 7cc4d38c663e Step 4/5 : ENTRYPOINT /example-hook-sidecar ---> Running in 41d71d9d3c14 ---> d5e1ed0c05ff Removing intermediate container 41d71d9d3c14 Step 5/5 : LABEL "example-hook-sidecar" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Running in 410222c59249 ---> 770f409df97f Removing intermediate container 410222c59249 Successfully built 770f409df97f hack/build-docker.sh push The push refers to a repository [localhost:33392/kubevirt/virt-controller] 23791084fe4f: Preparing b2f5abdac324: Preparing 891e1e4ef82a: Preparing b2f5abdac324: Pushed 23791084fe4f: Pushed 891e1e4ef82a: Pushed devel: digest: sha256:920d6859d399ad1f0d545b9c0de71399704a365768431541d77f276431c5055d size: 949 The push refers to a repository [localhost:33392/kubevirt/virt-launcher] 3b654d671de0: Preparing bad76be30dc3: Preparing 0a72ea7cd256: Preparing 6d4a5a894e83: Preparing a2846a3adb8d: Preparing 0b99c4111657: Preparing da38cf808aa5: Preparing b83399358a92: Preparing 186d8b3e4fd8: Preparing fa6154170bf5: Preparing 5eefb9960a36: Preparing 891e1e4ef82a: Preparing 0b99c4111657: Waiting da38cf808aa5: Waiting b83399358a92: Waiting 186d8b3e4fd8: Waiting 5eefb9960a36: Waiting bad76be30dc3: Pushed 6d4a5a894e83: Pushed 3b654d671de0: Pushed da38cf808aa5: Pushed b83399358a92: Pushed fa6154170bf5: Pushed 0a72ea7cd256: Pushed 891e1e4ef82a: Mounted from kubevirt/virt-controller 186d8b3e4fd8: Pushed a2846a3adb8d: Pushed 0b99c4111657: Pushed 5eefb9960a36: Pushed devel: digest: sha256:ef82bbe5f11a47a422f41be9bd69799af3fc134d9f15f023abb7baf92446a347 size: 2828 The push refers to a repository [localhost:33392/kubevirt/virt-handler] b13c72093fea: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-launcher b13c72093fea: Pushed devel: digest: sha256:ae578598d65d2e8275fce8ed1e4d0e2bcc4f1399f44e91f6040f99caafe84418 size: 741 The push refers to a repository [localhost:33392/kubevirt/virt-api] 2afede8469ea: Preparing afd1d781e4d1: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-handler afd1d781e4d1: Pushed 2afede8469ea: Pushed devel: digest: sha256:65991e3ea6d7d2ae85848ccf3bcba6d47f7de9daf7de180290c2825b9e58675b size: 948 The push refers to a repository [localhost:33392/kubevirt/disks-images-provider] dc0875c44573: Preparing 8fc77a44094f: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-api dc0875c44573: Pushed 8fc77a44094f: Pushed devel: digest: sha256:6983cf788a7b820c1e35fcae3ab69f0c6bf8246bd9b125048ab46c757eff9e58 size: 948 The push refers to a repository [localhost:33392/kubevirt/vm-killer] d1b69e768421: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/disks-images-provider d1b69e768421: Pushed devel: digest: sha256:a51ee0305cb1255dac598ac872707bce340d95ba5394dc3fac22ce1ec3178090 size: 740 The push refers to a repository [localhost:33392/kubevirt/registry-disk-v1alpha] 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 2a15632f54d4: Pushed 91a924e03d7c: Pushed 25edbec0eaea: Pushed devel: digest: sha256:92625a01ad38ccbd20daaef348bee72b98b522337c3fe8d9d464164226ef71d6 size: 948 The push refers to a repository [localhost:33392/kubevirt/cirros-registry-disk-demo] 8679079ce47d: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 91a924e03d7c: Mounted from kubevirt/registry-disk-v1alpha 25edbec0eaea: Mounted from kubevirt/registry-disk-v1alpha 2a15632f54d4: Mounted from kubevirt/registry-disk-v1alpha 8679079ce47d: Pushed devel: digest: sha256:61bd0de23218ace144f2feed65e671e3ca047a9699f1bea4f5ad6f0e600591c6 size: 1160 The push refers to a repository [localhost:33392/kubevirt/fedora-cloud-registry-disk-demo] 6aaf09f3f0fe: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 25edbec0eaea: Mounted from kubevirt/cirros-registry-disk-demo 91a924e03d7c: Mounted from kubevirt/cirros-registry-disk-demo 2a15632f54d4: Mounted from kubevirt/cirros-registry-disk-demo 6aaf09f3f0fe: Pushed devel: digest: sha256:28087ba777a949173f9c960afae98fce1ff6454b81d6bf707360697f8c025290 size: 1161 The push refers to a repository [localhost:33392/kubevirt/alpine-registry-disk-demo] 5013b58347dc: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 2a15632f54d4: Mounted from kubevirt/fedora-cloud-registry-disk-demo 25edbec0eaea: Mounted from kubevirt/fedora-cloud-registry-disk-demo 91a924e03d7c: Mounted from kubevirt/fedora-cloud-registry-disk-demo 5013b58347dc: Pushed devel: digest: sha256:b169183e203b28522665168fa28ee18fa3e8a957d8590ec8d01087333c0c2c08 size: 1160 The push refers to a repository [localhost:33392/kubevirt/subresource-access-test] ccbe034997ec: Preparing 4052ce9d0aff: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/vm-killer 4052ce9d0aff: Pushed ccbe034997ec: Pushed devel: digest: sha256:3467592827f70e67fa40073ddff2b23074df9b84713d347bf3b2433ff9478dcf size: 948 The push refers to a repository [localhost:33392/kubevirt/winrmcli] 64ccc7ac4271: Preparing 4242962b50c3: Preparing 0e374d8c733e: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/subresource-access-test 64ccc7ac4271: Pushed 0e374d8c733e: Pushed 4242962b50c3: Pushed devel: digest: sha256:ac4caed4b55aa84f3428119d18dc8366d2b793673fba9149ee91174454708ac1 size: 1165 The push refers to a repository [localhost:33392/kubevirt/example-hook-sidecar] 8ac31b8342ef: Preparing 39bae602f753: Preparing 8ac31b8342ef: Pushed 39bae602f753: Pushed devel: digest: sha256:c4ff8cb99431df91f260117bbd9deef5b2ce242a3625e64d7f5e4bfc4e079e34 size: 740 make[1]: Leaving directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' Done ./cluster/clean.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release0 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release0 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-191-g7d32629 ++ KUBEVIRT_VERSION=v0.7.0-191-g7d32629 + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:14ffc4a28e24a2510c9b455b56f35f6193a00b71c9150705f6afec41b003fc76 ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace image_pull_policy ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system +++ image_pull_policy=IfNotPresent ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:33392/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace image_pull_policy + echo 'Cleaning up ...' Cleaning up ... + cluster/kubectl.sh get vmis --all-namespaces -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,FINALIZERS:.metadata.finalizers --no-headers + grep foregroundDeleteVirtualMachine + read p error: the server doesn't have a resource type "vmis" + _kubectl delete ds -l kubevirt.io -n kube-system --cascade=false --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=libvirt --force --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=virt-handler --force --grace-period 0 No resources found + namespaces=(default ${namespace}) + for i in '${namespaces[@]}' + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete deployment -l kubevirt.io No resources found + _kubectl -n default delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rs -l kubevirt.io No resources found + _kubectl -n default delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete services -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n default delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete secrets -l kubevirt.io No resources found + _kubectl -n default delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pv -l kubevirt.io No resources found + _kubectl -n default delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pvc -l kubevirt.io No resources found + _kubectl -n default delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete ds -l kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n default delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pods -l kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n default delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rolebinding -l kubevirt.io No resources found + _kubectl -n default delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete roles -l kubevirt.io No resources found + _kubectl -n default delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterroles -l kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n default get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + for i in '${namespaces[@]}' + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete deployment -l kubevirt.io No resources found + _kubectl -n kube-system delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rs -l kubevirt.io No resources found + _kubectl -n kube-system delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete services -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n kube-system delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete secrets -l kubevirt.io No resources found + _kubectl -n kube-system delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pv -l kubevirt.io No resources found + _kubectl -n kube-system delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pvc -l kubevirt.io No resources found + _kubectl -n kube-system delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete ds -l kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n kube-system delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pods -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete roles -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterroles -l kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + sleep 2 + echo Done Done ./cluster/deploy.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release0 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release0 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-191-g7d32629 ++ KUBEVIRT_VERSION=v0.7.0-191-g7d32629 + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:14ffc4a28e24a2510c9b455b56f35f6193a00b71c9150705f6afec41b003fc76 ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace image_pull_policy ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system +++ image_pull_policy=IfNotPresent ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:33392/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace image_pull_policy + echo 'Deploying ...' Deploying ... + [[ -z openshift-3.10-release ]] + [[ openshift-3.10-release =~ .*-dev ]] + [[ openshift-3.10-release =~ .*-release ]] + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/demo-content.yaml =~ .*demo.* ]] + continue + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml =~ .*demo.* ]] + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml clusterrole.rbac.authorization.k8s.io "kubevirt.io:admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:edit" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:view" created serviceaccount "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver-auth-delegator" created rolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created role.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-controller" created serviceaccount "kubevirt-controller" created serviceaccount "kubevirt-privileged" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller-cluster-admin" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-privileged-cluster-admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:default" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt.io:default" created service "virt-api" created deployment.extensions "virt-api" created deployment.extensions "virt-controller" created daemonset.extensions "virt-handler" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstances.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancereplicasets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancepresets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachines.kubevirt.io" created + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R persistentvolumeclaim "disk-alpine" created persistentvolume "host-path-disk-alpine" created persistentvolumeclaim "disk-custom" created persistentvolume "host-path-disk-custom" created daemonset.extensions "disks-images-provider" created serviceaccount "kubevirt-testing" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-testing-cluster-admin" created + [[ os-3.10.0 =~ os-* ]] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-controller"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-testing"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-privileged"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-apiserver"] + _kubectl adm policy add-scc-to-user privileged admin + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged admin scc "privileged" added to: ["admin"] + echo Done Done + namespaces=(kube-system default) + [[ kube-system != \k\u\b\e\-\s\y\s\t\e\m ]] + timeout=300 + sample=30 + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'disks-images-provider-gmh8f 0/1 ContainerCreating 0 2s disks-images-provider-lwz77 0/1 ContainerCreating 0 2s virt-api-7d79764579-2xzv4 0/1 ContainerCreating 0 4s virt-api-7d79764579-7ghms 0/1 ContainerCreating 0 4s virt-controller-7d57d96b65-49lbl 0/1 ContainerCreating 0 4s virt-controller-7d57d96b65-zwjhl 0/1 ContainerCreating 0 4s virt-handler-82vhp 0/1 ContainerCreating 0 4s virt-handler-lx6l5 0/1 ContainerCreating 0 4s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + cluster/kubectl.sh get pods -n kube-system --no-headers + grep -v Running disks-images-provider-gmh8f 0/1 ContainerCreating 0 2s disks-images-provider-lwz77 0/1 ContainerCreating 0 2s virt-api-7d79764579-2xzv4 0/1 ContainerCreating 0 4s virt-api-7d79764579-7ghms 0/1 ContainerCreating 0 4s virt-controller-7d57d96b65-49lbl 0/1 ContainerCreating 0 4s virt-controller-7d57d96b65-zwjhl 0/1 ContainerCreating 0 4s virt-handler-82vhp 0/1 ContainerCreating 0 4s virt-handler-lx6l5 0/1 ContainerCreating 0 4s + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false + '[' -n false ']' + echo 'Waiting for KubeVirt containers to become ready ...' Waiting for KubeVirt containers to become ready ... + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + grep false + true + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n kube-system + cluster/kubectl.sh get pods -n kube-system NAME READY STATUS RESTARTS AGE disks-images-provider-gmh8f 1/1 Running 0 1m disks-images-provider-lwz77 1/1 Running 0 1m master-api-node01 1/1 Running 1 4d master-controllers-node01 1/1 Running 2 4d master-etcd-node01 1/1 Running 1 4d virt-api-7d79764579-2xzv4 1/1 Running 0 1m virt-api-7d79764579-7ghms 1/1 Running 0 1m virt-controller-7d57d96b65-49lbl 1/1 Running 0 1m virt-controller-7d57d96b65-zwjhl 1/1 Running 0 1m virt-handler-82vhp 1/1 Running 0 1m virt-handler-lx6l5 1/1 Running 0 1m + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n default --no-headers ++ grep -v Running ++ cluster/kubectl.sh get pods -n default --no-headers + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n default + cluster/kubectl.sh get pods -n default NAME READY STATUS RESTARTS AGE docker-registry-1-92ssr 1/1 Running 1 4d local-volume-provisioner-fmrc2 1/1 Running 0 8m local-volume-provisioner-jng2h 1/1 Running 0 8m registry-console-1-k87ct 1/1 Running 1 4d router-1-49jwl 1/1 Running 1 4d + kubectl version + cluster/kubectl.sh version oc v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 features: Basic-Auth GSSAPI Kerberos SPNEGO Server https://127.0.0.1:33389 openshift v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 + ginko_params='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + [[ openshift-3.10-release =~ windows.* ]] + FUNC_TEST_ARGS='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + make functest hack/dockerized "hack/build-func-tests.sh" Sending build context to Docker daemon 5.632 kB Step 1/12 : FROM fedora:28 ---> cc510acfcd70 Step 2/12 : ENV LIBVIRT_VERSION 4.2.0 ---> Using cache ---> 2c20d91d48e6 Step 3/12 : RUN curl --output /etc/yum.repos.d/fedora-virt-preview.repo https://fedorapeople.org/groups/virt/virt-preview/fedora-virt-preview.repo ---> Using cache ---> b7c63cc1ff31 Step 4/12 : RUN dnf -y install libvirt-devel-${LIBVIRT_VERSION} make git mercurial sudo gcc findutils gradle rsync-daemon rsync qemu-img protobuf-compiler && dnf -y clean all ---> Using cache ---> 784786580231 Step 5/12 : ENV GIMME_GO_VERSION 1.10 ---> Using cache ---> e6c3df647782 Step 6/12 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> aa4d2ed226ed Step 7/12 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 9d8471eeeed9 Step 8/12 : ADD rsyncd.conf /etc/rsyncd.conf ---> Using cache ---> 35522f6166d0 Step 9/12 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/mattn/goveralls && go get -u github.com/Masterminds/glide && go get golang.org/x/tools/cmd/goimports && git clone https://github.com/mvdan/sh.git $GOPATH/src/mvdan.cc/sh && cd /go/src/mvdan.cc/sh/cmd/shfmt && git checkout v2.5.0 && go get mvdan.cc/sh/cmd/shfmt && go install && go get -u github.com/golang/mock/gomock && go get -u github.com/rmohr/mock/mockgen && go get -u github.com/rmohr/go-swagger-utils/swagger-doc && go get -u github.com/onsi/ginkgo/ginkgo && go get -u -d k8s.io/code-generator/cmd/deepcopy-gen && go get -u -d k8s.io/code-generator/cmd/defaulter-gen && go get -u -d k8s.io/code-generator/cmd/openapi-gen && cd /go/src/k8s.io/code-generator/cmd/deepcopy-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/defaulter-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/openapi-gen && git checkout release-1.9 && go install && go get -u -d github.com/golang/protobuf/protoc-gen-go && cd /go/src/github.com/golang/protobuf/protoc-gen-go && git checkout 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 && go install ---> Using cache ---> a36d9a9a5e01 Step 10/12 : RUN pip install j2cli ---> Using cache ---> 87206820d410 Step 11/12 : ADD entrypoint.sh /entrypoint.sh ---> Using cache ---> b3a27a114600 Step 12/12 : ENTRYPOINT /entrypoint.sh ---> Using cache ---> ceba12cbc33e Successfully built ceba12cbc33e go version go1.10 linux/amd64 go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/functests.sh Running Suite: Tests Suite ========================== Random Seed: 1533579784 Will run 151 of 151 specs volumedisk0 compute • [SLOW TEST:63.270 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with 3 CPU cores /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:56 should report 3 cpu cores under guest OS /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:62 ------------------------------ • ------------------------------ • [SLOW TEST:18.949 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:164 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-2Mi /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ S [SKIPPING] [0.220 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:164 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-1Gi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 No node with hugepages hugepages-1Gi capacity /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:216 ------------------------------ • ------------------------------ • [SLOW TEST:97.863 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:294 when CPU model defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:340 should report defined CPU model /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:341 ------------------------------ • [SLOW TEST:105.711 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:294 when CPU model equals to passthrough /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:368 should report exactly the same model as node CPU /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:369 ------------------------------ • [SLOW TEST:93.083 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:294 when CPU model not defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:392 should report CPU model from libvirt capabilities /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:393 ------------------------------ • [SLOW TEST:51.275 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 New VirtualMachineInstance with all supported drives /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:413 should have all the device nodes /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:436 ------------------------------ ••••Service cluster-ip-vmi successfully exposed for virtualmachineinstance testvmi27vfv ------------------------------ • [SLOW TEST:54.218 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:68 Should expose a Cluster IP service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:71 ------------------------------ Service cluster-ip-target-vmi successfully exposed for virtualmachineinstance testvmi27vfv •Service node-port-vmi successfully exposed for virtualmachineinstance testvmi27vfv ------------------------------ • [SLOW TEST:9.171 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose NodePort service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:124 Should expose a NodePort service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:129 ------------------------------ Service cluster-ip-udp-vmi successfully exposed for virtualmachineinstance testvmi6hjlv • [SLOW TEST:53.873 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VMI /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:166 Expose ClusterIP UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:173 Should expose a ClusterIP service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:177 ------------------------------ Service node-port-udp-vmi successfully exposed for virtualmachineinstance testvmi6hjlv • [SLOW TEST:9.231 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VMI /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:166 Expose NodePort UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:205 Should expose a NodePort service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:210 ------------------------------ Service cluster-ip-vmirs successfully exposed for vmirs replicasetqwfgr • [SLOW TEST:72.980 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VMI replica set /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:253 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:286 Should create a ClusterIP service on VMRS and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:290 ------------------------------ Service cluster-ip-vm successfully exposed for virtualmachine testvmisxs4j VM testvmisxs4j was scheduled to start • [SLOW TEST:60.205 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on an VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:318 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:362 Connect to ClusterIP services that was set when VM was offline /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:363 ------------------------------ • [SLOW TEST:84.076 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting and stopping the same VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:90 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:91 should success multiple times /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:92 ------------------------------ • [SLOW TEST:16.618 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:111 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:112 should not modify the spec on status update /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:113 ------------------------------ • [SLOW TEST:21.766 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting multiple VMIs /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:129 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:130 should success /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:131 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.004 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to start a vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:133 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1387 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to stop a running vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:139 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1387 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.004 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have correct UUID /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:192 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1387 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.004 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have pod IP /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:208 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1387 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to start a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:242 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1387 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.002 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to stop a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:250 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1387 ------------------------------ • [SLOW TEST:48.794 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 should have cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:82 ------------------------------ • [SLOW TEST:160.059 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 with injected ssh-key /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:92 should have ssh-key under authorized keys /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:93 ------------------------------ • [SLOW TEST:58.118 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userData source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:118 should process provided cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:119 ------------------------------ • [SLOW TEST:49.119 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 should take user-data from k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:162 ------------------------------ • [SLOW TEST:48.780 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 with a cirros image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:67 should return that we are running cirros /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:68 ------------------------------ • [SLOW TEST:52.827 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 with a fedora image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:77 should return that we are running fedora /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:78 ------------------------------ • [SLOW TEST:50.002 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should be able to reconnect to console multiple times /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:87 ------------------------------ • [SLOW TEST:16.080 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should wait until the virtual machine is in running state and return a stream interface /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:103 ------------------------------ • [SLOW TEST:30.224 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should fail waiting for the virtual machine instance to be running /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:111 ------------------------------ • [SLOW TEST:30.224 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should fail waiting for the expecter /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:134 ------------------------------ • [SLOW TEST:54.735 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:51.814 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:142.627 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:136.857 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:50.675 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:113 should create a writeable emptyDisk with the right capacity /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:115 ------------------------------ • [SLOW TEST:50.751 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined and a specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:163 should create a writeable emptyDisk with the specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:165 ------------------------------ • [SLOW TEST:51.174 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should be successfully started /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:207 ------------------------------ Pod name: disks-images-provider-gmh8f Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-lwz77 Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-2xzv4 Pod phase: Running 2018/08/06 18:55:49 http: TLS handshake error from 10.128.0.1:50578: EOF 2018/08/06 18:55:59 http: TLS handshake error from 10.128.0.1:50626: EOF 2018/08/06 18:56:09 http: TLS handshake error from 10.128.0.1:50668: EOF 2018/08/06 18:56:19 http: TLS handshake error from 10.128.0.1:50710: EOF level=info timestamp=2018-08-06T18:56:29.272017Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/06 18:56:29 http: TLS handshake error from 10.128.0.1:50756: EOF 2018/08/06 18:56:39 http: TLS handshake error from 10.128.0.1:50800: EOF 2018/08/06 18:56:49 http: TLS handshake error from 10.128.0.1:50842: EOF level=info timestamp=2018-08-06T18:56:59.231262Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/06 18:56:59 http: TLS handshake error from 10.128.0.1:50888: EOF 2018/08/06 18:57:09 http: TLS handshake error from 10.128.0.1:50930: EOF 2018/08/06 18:57:19 http: TLS handshake error from 10.128.0.1:50972: EOF level=info timestamp=2018-08-06T18:57:29.260102Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/06 18:57:29 http: TLS handshake error from 10.128.0.1:51020: EOF 2018/08/06 18:57:39 http: TLS handshake error from 10.128.0.1:51062: EOF Pod name: virt-api-7d79764579-7ghms Pod phase: Running level=info timestamp=2018-08-06T18:56:54.878440Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T18:56:59.278689Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/06 18:56:59 http: TLS handshake error from 10.129.0.1:51218: EOF level=info timestamp=2018-08-06T18:57:01.426722Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/06 18:57:09 http: TLS handshake error from 10.129.0.1:51224: EOF level=info timestamp=2018-08-06T18:57:11.483740Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/06 18:57:19 http: TLS handshake error from 10.129.0.1:51230: EOF level=info timestamp=2018-08-06T18:57:21.539168Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T18:57:24.520218Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T18:57:24.943568Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T18:57:29.339996Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/06 18:57:29 http: TLS handshake error from 10.129.0.1:51236: EOF level=info timestamp=2018-08-06T18:57:31.597858Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/06 18:57:39 http: TLS handshake error from 10.129.0.1:51242: EOF level=info timestamp=2018-08-06T18:57:41.666511Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-controller-7d57d96b65-49lbl Pod phase: Running level=info timestamp=2018-08-06T18:51:22.395372Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7sdht kind= uid=b69900b2-99a9-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T18:51:22.455386Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7sdht\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7sdht" level=info timestamp=2018-08-06T18:52:19.168375Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmifh226 kind= uid=d86ff991-99a9-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T18:52:19.168488Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmifh226 kind= uid=d86ff991-99a9-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T18:53:10.000590Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdr8s kind= uid=f6bc5bb2-99a9-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T18:53:10.000750Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibdr8s kind= uid=f6bc5bb2-99a9-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T18:54:00.749576Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiph7wd kind= uid=14fc2214-99aa-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T18:54:00.750019Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiph7wd kind= uid=14fc2214-99aa-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T18:54:51.922865Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihclff kind= uid=337cda5c-99aa-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T18:54:51.923058Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihclff kind= uid=337cda5c-99aa-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T18:54:51.989331Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihclff\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihclff" level=info timestamp=2018-08-06T18:54:52.035987Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihclff\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihclff" level=info timestamp=2018-08-06T18:56:16.736007Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihclff kind= uid=6609ab46-99aa-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T18:56:16.736117Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmihclff kind= uid=6609ab46-99aa-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T18:56:16.796374Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmihclff\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmihclff" Pod name: virt-controller-7d57d96b65-zwjhl Pod phase: Running level=info timestamp=2018-08-06T18:21:47.664700Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-82vhp Pod phase: Running level=info timestamp=2018-08-06T18:54:00.795056Z pos=vm.go:329 component=virt-handler msg="Domain: existing: true\n" level=info timestamp=2018-08-06T18:54:00.795073Z pos=vm.go:331 component=virt-handler msg="Domain status: Shutoff, reason: Destroyed\n" level=info timestamp=2018-08-06T18:54:00.795145Z pos=vm.go:358 component=virt-handler namespace=kubevirt-test-default name=testvmibdr8s kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-08-06T18:54:00.795185Z pos=vm.go:410 component=virt-handler namespace=kubevirt-test-default name=testvmibdr8s kind=VirtualMachineInstance uid= msg="Processing deletion." level=info timestamp=2018-08-06T18:54:00.796464Z pos=vm.go:585 component=virt-handler namespace=kubevirt-test-default name=testvmibdr8s kind=VirtualMachineInstance uid= msg="Signaled deletion for testvmibdr8s" level=info timestamp=2018-08-06T18:54:00.800644Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmibdr8s kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-08-06T18:54:48.053842Z pos=vm.go:746 component=virt-handler namespace=kubevirt-test-default name=testvmibdr8s kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-08-06T18:54:48.054166Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmibdr8s, existing: false\n" level=info timestamp=2018-08-06T18:54:48.054750Z pos=vm.go:329 component=virt-handler msg="Domain: existing: false\n" level=info timestamp=2018-08-06T18:54:48.054821Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmibdr8s kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-08-06T18:54:48.055248Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmibdr8s kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-08-06T18:54:48.055501Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmibdr8s, existing: false\n" level=info timestamp=2018-08-06T18:54:48.055591Z pos=vm.go:329 component=virt-handler msg="Domain: existing: false\n" level=info timestamp=2018-08-06T18:54:48.055683Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmibdr8s kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-08-06T18:54:48.055780Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmibdr8s kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-lx6l5 Pod phase: Running level=info timestamp=2018-08-06T18:56:16.582262Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmihclff, existing: false\n" level=info timestamp=2018-08-06T18:56:16.582350Z pos=vm.go:329 component=virt-handler msg="Domain: existing: true\n" level=info timestamp=2018-08-06T18:56:16.582378Z pos=vm.go:331 component=virt-handler msg="Domain status: Shutoff, reason: Destroyed\n" level=info timestamp=2018-08-06T18:56:16.582508Z pos=vm.go:358 component=virt-handler namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-08-06T18:56:16.582660Z pos=vm.go:410 component=virt-handler namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid= msg="Processing deletion." level=info timestamp=2018-08-06T18:56:16.582766Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-08-06T18:56:30.595958Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmihclff, existing: false\n" level=info timestamp=2018-08-06T18:56:30.597614Z pos=vm.go:329 component=virt-handler msg="Domain: existing: true\n" level=info timestamp=2018-08-06T18:56:30.597723Z pos=vm.go:331 component=virt-handler msg="Domain status: Shutoff, reason: Destroyed\n" level=info timestamp=2018-08-06T18:56:30.597891Z pos=vm.go:358 component=virt-handler namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-08-06T18:56:30.597956Z pos=vm.go:410 component=virt-handler namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid= msg="Processing deletion." level=info timestamp=2018-08-06T18:56:30.598021Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-08-06T18:56:31.987164Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmihclff, existing: true\n" level=info timestamp=2018-08-06T18:56:31.987249Z pos=vm.go:315 component=virt-handler msg="vmi is in phase: Scheduled\n" level=info timestamp=2018-08-06T18:56:31.987279Z pos=vm.go:325 component=virt-handler namespace=kubevirt-test-default name=testvmihclff kind= uid=6609ab46-99aa-11e8-ab32-525500d15501 msg="Ignoring domain from an older VMI, will be handled by its own VMI." Pod name: virt-launcher-testvmihclff-twg4g Pod phase: Running level=info timestamp=2018-08-06T18:56:20.530248Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-08-06T18:56:20.530971Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-08-06T18:56:20.532332Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-08-06T18:56:30.545107Z pos=libvirt.go:276 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-08-06T18:56:30.595357Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmihclff" level=info timestamp=2018-08-06T18:56:30.597312Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-08-06T18:56:30.597542Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" • Failure [175.470 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should not persist data [It] /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:218 Timed out after 90.165s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1078 ------------------------------ STEP: Starting the VirtualMachineInstance STEP: Starting a VirtualMachineInstance STEP: Waiting until the VirtualMachineInstance will start level=info timestamp=2018-08-06T18:54:53.110961Z pos=utils.go:246 component=tests namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid=337cda5c-99aa-11e8-ab32-525500d15501 msg="Created virtual machine pod virt-launcher-testvmihclff-qbc9k" level=info timestamp=2018-08-06T18:55:08.726286Z pos=utils.go:246 component=tests namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid=337cda5c-99aa-11e8-ab32-525500d15501 msg="Pod owner ship transferred to the node virt-launcher-testvmihclff-qbc9k" level=info timestamp=2018-08-06T18:55:09.926983Z pos=utils.go:246 component=tests namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid=337cda5c-99aa-11e8-ab32-525500d15501 msg="VirtualMachineInstance defined." level=info timestamp=2018-08-06T18:55:09.946222Z pos=utils.go:246 component=tests namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid=337cda5c-99aa-11e8-ab32-525500d15501 msg="VirtualMachineInstance started." STEP: Writing an arbitrary file to it's EFI partition STEP: Killing a VirtualMachineInstance STEP: Starting the VirtualMachineInstance again STEP: Starting a VirtualMachineInstance STEP: Waiting until the VirtualMachineInstance will start level=info timestamp=2018-08-06T18:56:17.788310Z pos=utils.go:246 component=tests namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid=337cda5c-99aa-11e8-ab32-525500d15501 msg="Created virtual machine pod virt-launcher-testvmihclff-qbc9k" level=info timestamp=2018-08-06T18:56:17.789122Z pos=utils.go:246 component=tests namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid=337cda5c-99aa-11e8-ab32-525500d15501 msg="Pod owner ship transferred to the node virt-launcher-testvmihclff-qbc9k" level=info timestamp=2018-08-06T18:56:17.789785Z pos=utils.go:246 component=tests namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid=337cda5c-99aa-11e8-ab32-525500d15501 msg="VirtualMachineInstance defined." level=info timestamp=2018-08-06T18:56:17.789911Z pos=utils.go:246 component=tests namespace=kubevirt-test-default name=testvmihclff kind=VirtualMachineInstance uid=337cda5c-99aa-11e8-ab32-525500d15501 msg="VirtualMachineInstance started." 2018/08/06 14:57:48 read closing down: EOF • [SLOW TEST:155.069 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With VirtualMachineInstance with two PVCs /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:266 should start vmi multiple times /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:278 ------------------------------ • [SLOW TEST:102.249 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • ------------------------------ S [SKIPPING] [0.004 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance with custom MAC address [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Openshift detected: Custom MAC addresses on pod networks are not suppored /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1573 ------------------------------ •••••••• ------------------------------ • [SLOW TEST:55.936 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom interface model /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:368 should expose the right device type to the guest /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:369 ------------------------------ • ------------------------------ • [SLOW TEST:59.908 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:402 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:403 ------------------------------ Pod name: disks-images-provider-gmh8f Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-lwz77 Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-2xzv4 Pod phase: Running 2018/08/06 19:05:39 http: TLS handshake error from 10.128.0.1:53398: EOF 2018/08/06 19:05:49 http: TLS handshake error from 10.128.0.1:53440: EOF level=info timestamp=2018-08-06T19:05:59.237518Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/06 19:05:59 http: TLS handshake error from 10.128.0.1:53488: EOF 2018/08/06 19:06:09 http: TLS handshake error from 10.128.0.1:53532: EOF 2018/08/06 19:06:19 http: TLS handshake error from 10.128.0.1:53574: EOF 2018/08/06 19:06:29 http: TLS handshake error from 10.128.0.1:53620: EOF 2018/08/06 19:06:39 http: TLS handshake error from 10.128.0.1:53662: EOF 2018/08/06 19:06:49 http: TLS handshake error from 10.128.0.1:53704: EOF level=info timestamp=2018-08-06T19:06:59.285196Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/06 19:06:59 http: TLS handshake error from 10.128.0.1:53750: EOF 2018/08/06 19:07:09 http: TLS handshake error from 10.128.0.1:53792: EOF 2018/08/06 19:07:19 http: TLS handshake error from 10.128.0.1:53834: EOF level=info timestamp=2018-08-06T19:07:29.233600Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/06 19:07:29 http: TLS handshake error from 10.128.0.1:53886: EOF Pod name: virt-api-7d79764579-7ghms Pod phase: Running 2018/08/06 19:06:39 http: TLS handshake error from 10.129.0.1:51604: EOF level=info timestamp=2018-08-06T19:06:44.710124Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/06 19:06:49 http: TLS handshake error from 10.129.0.1:51610: EOF level=info timestamp=2018-08-06T19:06:54.764792Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T19:06:56.493273Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T19:06:56.674063Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/06 19:06:59 http: TLS handshake error from 10.129.0.1:51616: EOF level=info timestamp=2018-08-06T19:07:00.533117Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T19:07:04.821350Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/06 19:07:09 http: TLS handshake error from 10.129.0.1:51622: EOF level=info timestamp=2018-08-06T19:07:14.884394Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/06 19:07:19 http: TLS handshake error from 10.129.0.1:51628: EOF level=info timestamp=2018-08-06T19:07:24.927074Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T19:07:26.668868Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T19:07:26.734646Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-controller-7d57d96b65-49lbl Pod phase: Running level=info timestamp=2018-08-06T19:00:22.362282Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi2gwst kind= uid=f8711d96-99aa-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T19:00:22.388804Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi5mxjd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi5mxjd" level=info timestamp=2018-08-06T19:00:22.391347Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiz6shj kind= uid=f873c678-99aa-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T19:00:22.391420Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiz6shj kind= uid=f873c678-99aa-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T19:00:22.428618Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi5mxjd\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi5mxjd" level=info timestamp=2018-08-06T19:02:32.074849Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T19:02:32.079030Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T19:02:32.160399Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmibc7q2\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmibc7q2" level=info timestamp=2018-08-06T19:03:29.266117Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6qg25 kind= uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T19:03:29.266296Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi6qg25 kind= uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T19:03:29.371392Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi6qg25\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi6qg25" level=info timestamp=2018-08-06T19:03:29.382246Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi6qg25\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi6qg25" level=info timestamp=2018-08-06T19:04:29.168610Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi456k8 kind= uid=8b8cef50-99ab-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T19:04:29.168728Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi456k8 kind= uid=8b8cef50-99ab-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T19:04:29.232962Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi456k8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi456k8" Pod name: virt-controller-7d57d96b65-zwjhl Pod phase: Running level=info timestamp=2018-08-06T18:21:47.664700Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-82vhp Pod phase: Running level=info timestamp=2018-08-06T19:03:45.086756Z pos=vm.go:756 component=virt-handler namespace=kubevirt-test-default name=testvmi6qg25 kind=Domain uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="Domain is in state Running reason Unknown" level=info timestamp=2018-08-06T19:03:45.089186Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmi6qg25 kind= uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-08-06T19:03:45.089425Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmi6qg25, existing: true\n" level=info timestamp=2018-08-06T19:03:45.089540Z pos=vm.go:315 component=virt-handler msg="vmi is in phase: Scheduled\n" level=info timestamp=2018-08-06T19:03:45.089677Z pos=vm.go:329 component=virt-handler msg="Domain: existing: true\n" level=info timestamp=2018-08-06T19:03:45.089832Z pos=vm.go:331 component=virt-handler msg="Domain status: Running, reason: Unknown\n" level=info timestamp=2018-08-06T19:03:45.090010Z pos=vm.go:419 component=virt-handler namespace=kubevirt-test-default name=testvmi6qg25 kind= uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="No update processing required" level=info timestamp=2018-08-06T19:03:45.103824Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-08-06T19:03:45.110651Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmi6qg25 kind= uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-08-06T19:03:45.110727Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmi6qg25, existing: true\n" level=info timestamp=2018-08-06T19:03:45.110756Z pos=vm.go:315 component=virt-handler msg="vmi is in phase: Running\n" level=info timestamp=2018-08-06T19:03:45.110779Z pos=vm.go:329 component=virt-handler msg="Domain: existing: true\n" level=info timestamp=2018-08-06T19:03:45.110804Z pos=vm.go:331 component=virt-handler msg="Domain status: Running, reason: Unknown\n" level=info timestamp=2018-08-06T19:03:45.110881Z pos=vm.go:416 component=virt-handler namespace=kubevirt-test-default name=testvmi6qg25 kind= uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="Processing vmi update" level=info timestamp=2018-08-06T19:03:45.121547Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmi6qg25 kind= uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-handler-lx6l5 Pod phase: Running level=info timestamp=2018-08-06T19:02:49.627146Z pos=vm.go:419 component=virt-handler namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 msg="No update processing required" level=error timestamp=2018-08-06T19:02:49.632986Z pos=vm.go:431 component=virt-handler namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmibc7q2\": the object has been modified; please apply your changes to the latest version and try again" msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-08-06T19:02:49.633124Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmibc7q2\": the object has been modified; please apply your changes to the latest version and try again" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmibc7q2" level=info timestamp=2018-08-06T19:02:49.633529Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmibc7q2, existing: true\n" level=info timestamp=2018-08-06T19:02:49.633792Z pos=vm.go:315 component=virt-handler msg="vmi is in phase: Running\n" level=info timestamp=2018-08-06T19:02:49.633839Z pos=vm.go:329 component=virt-handler msg="Domain: existing: true\n" level=info timestamp=2018-08-06T19:02:49.633964Z pos=vm.go:331 component=virt-handler msg="Domain status: Running, reason: Unknown\n" level=info timestamp=2018-08-06T19:02:49.634109Z pos=vm.go:416 component=virt-handler namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 msg="Processing vmi update" level=info timestamp=2018-08-06T19:02:49.638455Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-08-06T19:02:49.638691Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmibc7q2, existing: true\n" level=info timestamp=2018-08-06T19:02:49.638780Z pos=vm.go:315 component=virt-handler msg="vmi is in phase: Running\n" level=info timestamp=2018-08-06T19:02:49.638883Z pos=vm.go:329 component=virt-handler msg="Domain: existing: true\n" level=info timestamp=2018-08-06T19:02:49.638981Z pos=vm.go:331 component=virt-handler msg="Domain status: Running, reason: Unknown\n" level=info timestamp=2018-08-06T19:02:49.639107Z pos=vm.go:416 component=virt-handler namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 msg="Processing vmi update" level=info timestamp=2018-08-06T19:02:49.642866Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 msg="Synchronization loop succeeded." Pod name: netcat2t5c9 Pod phase: Succeeded ++ head -n 1 +++ nc 10.129.0.61 1500 -i 1 -w 1 Hello World! succeeded + x='Hello World!' + echo 'Hello World!' + '[' 'Hello World!' = 'Hello World!' ']' + echo succeeded + exit 0 Pod name: netcat86xm9 Pod phase: Succeeded ++ head -n 1 +++ nc myservice.kubevirt-test-default 1500 -i 1 -w 1 + x='Hello World!' + echo 'Hello World!' Hello World! succeeded + '[' 'Hello World!' = 'Hello World!' ']' + echo succeeded + exit 0 Pod name: netcatcmdbd Pod phase: Failed ++ head -n 1 +++ nc wrongservice.kubevirt-test-default 1500 -i 1 -w 1 Ncat: Could not resolve hostname "wrongservice.kubevirt-test-default": Name or service not known. QUITTING. + x= + echo '' + '[' '' = 'Hello World!' ']' + echo failed + exit 1 failed Pod name: netcatff7dc Pod phase: Succeeded ++ head -n 1 +++ nc my-subdomain.myvmi.kubevirt-test-default 1500 -i 1 -w 1 + x='Hello World!' + echo 'Hello World!' + '[' 'Hello World!' = 'Hello World!' ']' + echo succeeded + exit 0 Hello World! succeeded Pod name: netcathxq9c Pod phase: Succeeded ++ head -n 1 +++ nc 10.129.0.61 1500 -i 1 -w 1 + x='Hello World!' + echo 'Hello World!' + '[' 'Hello World!' = 'Hello World!' ']' + echo succeeded + exit 0 Hello World! succeeded Pod name: netcatrgj57 Pod phase: Succeeded ++ head -n 1 +++ nc 10.129.0.61 1500 -i 1 -w 1 + x='Hello World!' + echo 'Hello World!' + '[' 'Hello World!' = 'Hello World!' ']' Hello World! succeeded + echo succeeded + exit 0 Pod name: netcatrxrq5 Pod phase: Succeeded ++ head -n 1 +++ nc 10.129.0.61 1500 -i 1 -w 1 + x='Hello World!' + echo 'Hello World!' Hello World! succeeded + '[' 'Hello World!' = 'Hello World!' ']' + echo succeeded + exit 0 Pod name: virt-launcher-testvmi2gwst-nwxpt Pod phase: Running level=info timestamp=2018-08-06T19:00:40.880662Z pos=manager.go:158 component=virt-launcher namespace=kubevirt-test-default name=testvmi2gwst kind= uid=f8711d96-99aa-11e8-ab32-525500d15501 msg="Domain defined." level=info timestamp=2018-08-06T19:00:41.691354Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-08-06T19:00:41.830665Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:41.842118Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID f7140705-3263-49ae-bcee-5377e499dcc4" level=info timestamp=2018-08-06T19:00:41.842512Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-08-06T19:00:42.090715Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-08-06T19:00:42.123360Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:00:42.124951Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:42.191012Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-08-06T19:00:42.198458Z pos=manager.go:189 component=virt-launcher namespace=kubevirt-test-default name=testvmi2gwst kind= uid=f8711d96-99aa-11e8-ab32-525500d15501 msg="Domain started." level=info timestamp=2018-08-06T19:00:42.230498Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi2gwst kind= uid=f8711d96-99aa-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:00:42.240805Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:00:42.250036Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:42.449699Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi2gwst kind= uid=f8711d96-99aa-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:00:42.847191Z pos=monitor.go:222 component=virt-launcher msg="Found PID for f7140705-3263-49ae-bcee-5377e499dcc4: 196" Pod name: virt-launcher-testvmi456k8-gtc5v Pod phase: Running level=info timestamp=2018-08-06T19:04:35.399992Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-08-06T19:04:35.400338Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-08-06T19:04:35.401765Z pos=libvirt.go:261 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" panic: failed to connect to libvirtd: cannot connect to libvirt daemon: timed out waiting for the condition goroutine 1 [running]: main.createLibvirtConnection(0xc4200a6f60, 0x13ae140) /root/go/src/kubevirt.io/kubevirt/cmd/virt-launcher/virt-launcher.go:112 +0x125 main.main() /root/go/src/kubevirt.io/kubevirt/cmd/virt-launcher/virt-launcher.go:287 +0x4f6 virt-launcher exited with code 2 Pod name: virt-launcher-testvmi5mxjd-hpgz9 Pod phase: Running level=info timestamp=2018-08-06T19:00:39.261521Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-08-06T19:00:40.087154Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-08-06T19:00:40.113002Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:40.130318Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 6693b452-ecfd-4e3e-a530-f91d1b5adabb" level=info timestamp=2018-08-06T19:00:40.134230Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-08-06T19:00:40.642583Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-08-06T19:00:40.679649Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:00:40.686297Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:40.686650Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-08-06T19:00:40.705366Z pos=manager.go:189 component=virt-launcher namespace=kubevirt-test-default name=testvmi5mxjd kind= uid=f86cf073-99aa-11e8-ab32-525500d15501 msg="Domain started." level=info timestamp=2018-08-06T19:00:40.710853Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi5mxjd kind= uid=f86cf073-99aa-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:00:40.712645Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:00:40.721653Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:40.861949Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi5mxjd kind= uid=f86cf073-99aa-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:00:41.138750Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 6693b452-ecfd-4e3e-a530-f91d1b5adabb: 198" Pod name: virt-launcher-testvmi6qg25-q9mtg Pod phase: Running level=info timestamp=2018-08-06T19:03:44.123153Z pos=manager.go:158 component=virt-launcher namespace=kubevirt-test-default name=testvmi6qg25 kind= uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="Domain defined." level=info timestamp=2018-08-06T19:03:44.772409Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-08-06T19:03:44.776410Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 921e2e2f-86d6-4ad7-b19c-a02bc429f12d" level=info timestamp=2018-08-06T19:03:44.777230Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-08-06T19:03:44.783042Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:03:45.049069Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-08-06T19:03:45.081994Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:03:45.082849Z pos=manager.go:189 component=virt-launcher namespace=kubevirt-test-default name=testvmi6qg25 kind= uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="Domain started." level=info timestamp=2018-08-06T19:03:45.084709Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi6qg25 kind= uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:03:45.085018Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:03:45.085222Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-08-06T19:03:45.102156Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:03:45.104359Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:03:45.114329Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi6qg25 kind= uid=67d81d25-99ab-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:03:45.781963Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 921e2e2f-86d6-4ad7-b19c-a02bc429f12d: 187" Pod name: virt-launcher-testvmibc7q2-hf74z Pod phase: Running level=info timestamp=2018-08-06T19:02:49.293165Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID b49d1c90-a3b1-47cd-b48c-ccbe6c2042cc" level=info timestamp=2018-08-06T19:02:49.294178Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-08-06T19:02:49.296186Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-08-06T19:02:49.300661Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:02:49.570361Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-08-06T19:02:49.586146Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:02:49.594537Z pos=manager.go:189 component=virt-launcher namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 msg="Domain started." level=info timestamp=2018-08-06T19:02:49.595787Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:02:49.598063Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:02:49.598258Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-08-06T19:02:49.620242Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:02:49.623262Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:02:49.637913Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:02:49.642520Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmibc7q2 kind= uid=45c1ba90-99ab-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:02:50.297663Z pos=monitor.go:222 component=virt-launcher msg="Found PID for b49d1c90-a3b1-47cd-b48c-ccbe6c2042cc: 193" Pod name: virt-launcher-testvmiz6shj-22c89 Pod phase: Running level=info timestamp=2018-08-06T19:00:38.859018Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-08-06T19:00:39.631687Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-08-06T19:00:39.660545Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 7d139422-34de-42bf-8b13-bebc8eec2275" level=info timestamp=2018-08-06T19:00:39.660908Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-08-06T19:00:39.669303Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:40.146234Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-08-06T19:00:40.186042Z pos=manager.go:189 component=virt-launcher namespace=kubevirt-test-default name=testvmiz6shj kind= uid=f873c678-99aa-11e8-ab32-525500d15501 msg="Domain started." level=info timestamp=2018-08-06T19:00:40.195818Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiz6shj kind= uid=f873c678-99aa-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:00:40.196815Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:00:40.210412Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:40.210537Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-08-06T19:00:40.230629Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:00:40.310798Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:40.675125Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 7d139422-34de-42bf-8b13-bebc8eec2275: 187" level=info timestamp=2018-08-06T19:00:40.824943Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiz6shj kind= uid=f873c678-99aa-11e8-ab32-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmizswc4-4mh5v Pod phase: Running level=info timestamp=2018-08-06T19:00:40.571875Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:40.631456Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 3d544586-5681-4a6f-a9d6-1498b9f8cb59" level=info timestamp=2018-08-06T19:00:40.631924Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-08-06T19:00:41.066264Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-08-06T19:00:41.121218Z pos=manager.go:189 component=virt-launcher namespace=kubevirt-test-default name=testvmizswc4 kind= uid=f86ec332-99aa-11e8-ab32-525500d15501 msg="Domain started." level=info timestamp=2018-08-06T19:00:41.123933Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmizswc4 kind= uid=f86ec332-99aa-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:00:41.143773Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:00:41.174748Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:41.175064Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-08-06T19:00:41.215455Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmizswc4 kind= uid=f86ec332-99aa-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:00:41.216478Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-08-06T19:00:41.230706Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmizswc4 kind= uid=f86ec332-99aa-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:00:41.230822Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:00:41.268095Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmizswc4 kind= uid=f86ec332-99aa-11e8-ab32-525500d15501 msg="Synced vmi" level=info timestamp=2018-08-06T19:00:41.637287Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 3d544586-5681-4a6f-a9d6-1498b9f8cb59: 183" • Failure [182.848 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address in non-conventional format /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:414 should configure custom MAC address [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:415 Timed out after 90.005s. Timed out waiting for VMI to enter Running phase Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1091 ------------------------------ STEP: checking eth0 MAC address level=info timestamp=2018-08-06T19:04:30.302825Z pos=utils.go:246 component=tests namespace=kubevirt-test-default name=testvmi456k8 kind=VirtualMachineInstance uid=8b8cef50-99ab-11e8-ab32-525500d15501 msg="Created virtual machine pod virt-launcher-testvmi456k8-gtc5v" • [SLOW TEST:66.296 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address and slirp interface /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:427 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:428 ------------------------------ • [SLOW TEST:60.888 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with disabled automatic attachment of interfaces /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:440 should not configure any external interfaces /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:441 ------------------------------ • ------------------------------ • [SLOW TEST:21.548 seconds] VNC /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:54 with VNC connection /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:62 should allow accessing the VNC device /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:64 ------------------------------ ••• ------------------------------ • [SLOW TEST:15.163 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 should start it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:80 ------------------------------ • [SLOW TEST:17.809 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 should attach virt-launcher to it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:86 ------------------------------ Pod name: disks-images-provider-gmh8f Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-lwz77 Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-2xzv4 Pod phase: Running level=info timestamp=2018-08-06T19:09:40.354416Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmixkm2m/console proto=HTTP/1.1 statusCode=200 contentLength=0 level=error timestamp=2018-08-06T19:09:41.210191Z pos=subresource.go:85 component=virt-api msg= 2018/08/06 19:09:41 http: response.WriteHeader on hijacked connection level=error timestamp=2018-08-06T19:09:41.210422Z pos=subresource.go:97 component=virt-api reason="read tcp 10.128.0.27:8443->10.128.0.1:57934: use of closed network connection" msg="error ecountered reading from websocket stream" level=info timestamp=2018-08-06T19:09:41.210501Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmiz6shj/console proto=HTTP/1.1 statusCode=200 contentLength=0 2018/08/06 19:09:49 http: TLS handshake error from 10.128.0.1:54546: EOF 2018/08/06 19:09:59 http: TLS handshake error from 10.128.0.1:54592: EOF level=info timestamp=2018-08-06T19:10:03.375246Z pos=subresource.go:75 component=virt-api msg="Websocket connection upgraded" level=error timestamp=2018-08-06T19:10:04.019408Z pos=subresource.go:85 component=virt-api msg="connection failed: command terminated with exit code 1" 2018/08/06 19:10:04 http: response.WriteHeader on hijacked connection level=error timestamp=2018-08-06T19:10:04.020353Z pos=subresource.go:97 component=virt-api reason="read tcp 10.128.0.27:8443->10.128.0.1:60334: use of closed network connection" msg="error ecountered reading from websocket stream" level=info timestamp=2018-08-06T19:10:04.020410Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmigmqst/console proto=HTTP/1.1 statusCode=500 contentLength=0 2018/08/06 19:10:09 http: TLS handshake error from 10.128.0.1:54654: EOF 2018/08/06 19:10:19 http: TLS handshake error from 10.128.0.1:54696: EOF 2018/08/06 19:10:29 http: TLS handshake error from 10.128.0.1:54742: EOF Pod name: virt-api-7d79764579-7ghms Pod phase: Running level=error timestamp=2018-08-06T19:10:03.730151Z pos=subresource.go:97 component=virt-api reason="read tcp 10.129.0.25:8443->10.128.0.1:60326: use of closed network connection" msg="error ecountered reading from websocket stream" level=info timestamp=2018-08-06T19:10:03.730226Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/apis/subresources.kubevirt.io/v1alpha2/namespaces/kubevirt-test-default/virtualmachineinstances/testvmigmqst/vnc proto=HTTP/1.1 statusCode=200 contentLength=0 level=info timestamp=2018-08-06T19:10:05.898573Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/06 19:10:09 http: TLS handshake error from 10.129.0.1:51744: EOF level=info timestamp=2018-08-06T19:10:13.853547Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/openapi/v2 proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-08-06T19:10:13.872214Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/swagger.json proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-08-06T19:10:15.962321Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/08/06 19:10:19 http: TLS handshake error from 10.129.0.1:51750: EOF level=info timestamp=2018-08-06T19:10:26.017247Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T19:10:27.300735Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T19:10:27.321466Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T19:10:28.988556Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/08/06 19:10:29 http: TLS handshake error from 10.129.0.1:51756: EOF level=info timestamp=2018-08-06T19:10:31.114858Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-08-06T19:10:36.073622Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-controller-7d57d96b65-49lbl Pod phase: Running level=info timestamp=2018-08-06T19:09:41.024845Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmix8tcq kind= uid=456e8294-99ac-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T19:09:41.639609Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmigmqst kind= uid=45caa3b9-99ac-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T19:09:41.639806Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmigmqst kind= uid=45caa3b9-99ac-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T19:10:03.324436Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmij25fw kind= uid=52b97dd9-99ac-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T19:10:03.324586Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmij25fw kind= uid=52b97dd9-99ac-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T19:10:03.397200Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmij25fw\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmij25fw" level=info timestamp=2018-08-06T19:10:03.506281Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmij25fw\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmij25fw, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 52b97dd9-99ac-11e8-ab32-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmij25fw" level=info timestamp=2018-08-06T19:10:03.644181Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi59fpx kind= uid=52ea1935-99ac-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T19:10:03.644293Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi59fpx kind= uid=52ea1935-99ac-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T19:10:03.783049Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi59fpx\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi59fpx" level=info timestamp=2018-08-06T19:10:18.805172Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil8lqx kind= uid=5bf12d28-99ac-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T19:10:18.805350Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmil8lqx kind= uid=5bf12d28-99ac-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T19:10:36.574869Z pos=preset.go:142 component=virt-controller service=http namespace=kubevirt-test-default name=testvmin6wr6 kind= uid=668b3294-99ac-11e8-ab32-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-08-06T19:10:36.575016Z pos=preset.go:169 component=virt-controller service=http namespace=kubevirt-test-default name=testvmin6wr6 kind= uid=668b3294-99ac-11e8-ab32-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-08-06T19:10:36.650790Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmin6wr6\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmin6wr6" Pod name: virt-controller-7d57d96b65-zwjhl Pod phase: Running level=info timestamp=2018-08-06T18:21:47.664700Z pos=application.go:177 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-82vhp Pod phase: Running level=info timestamp=2018-08-06T19:10:36.580787Z pos=vm.go:329 component=virt-handler msg="Domain: existing: true\n" level=info timestamp=2018-08-06T19:10:36.580825Z pos=vm.go:331 component=virt-handler msg="Domain status: Running, reason: Unknown\n" level=info timestamp=2018-08-06T19:10:36.580994Z pos=vm.go:353 component=virt-handler namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-08-06T19:10:36.581072Z pos=vm.go:407 component=virt-handler namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-08-06T19:10:36.582386Z pos=vm.go:556 component=virt-handler namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Grace period expired, killing deleted VirtualMachineInstance testvmil8lqx" level=info timestamp=2018-08-06T19:10:36.603846Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-08-06T19:10:36.608269Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-08-06T19:10:36.608459Z pos=vm.go:756 component=virt-handler namespace=kubevirt-test-default name=testvmil8lqx kind=Domain uid=5bf12d28-99ac-11e8-ab32-525500d15501 msg="Domain is in state Shutoff reason Destroyed" level=info timestamp=2018-08-06T19:10:36.608511Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmil8lqx, existing: false\n" level=info timestamp=2018-08-06T19:10:36.608537Z pos=vm.go:329 component=virt-handler msg="Domain: existing: true\n" level=info timestamp=2018-08-06T19:10:36.608573Z pos=vm.go:331 component=virt-handler msg="Domain status: Shutoff, reason: Destroyed\n" level=info timestamp=2018-08-06T19:10:36.608677Z pos=vm.go:358 component=virt-handler namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-08-06T19:10:36.608712Z pos=vm.go:410 component=virt-handler namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Processing deletion." level=info timestamp=2018-08-06T19:10:36.609194Z pos=vm.go:585 component=virt-handler namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Signaled deletion for testvmil8lqx" level=info timestamp=2018-08-06T19:10:36.613130Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-handler-lx6l5 Pod phase: Running level=info timestamp=2018-08-06T19:09:40.162995Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmibc7q2 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-08-06T19:09:40.163329Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmibc7q2 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-08-06T19:09:40.176052Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmixkm2m, existing: false\n" level=info timestamp=2018-08-06T19:09:40.176135Z pos=vm.go:329 component=virt-handler msg="Domain: existing: true\n" level=info timestamp=2018-08-06T19:09:40.176159Z pos=vm.go:331 component=virt-handler msg="Domain status: Shutoff, reason: Destroyed\n" level=info timestamp=2018-08-06T19:09:40.176252Z pos=vm.go:358 component=virt-handler namespace=kubevirt-test-default name=testvmixkm2m kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-08-06T19:09:40.176282Z pos=vm.go:410 component=virt-handler namespace=kubevirt-test-default name=testvmixkm2m kind=VirtualMachineInstance uid= msg="Processing deletion." level=info timestamp=2018-08-06T19:09:40.177716Z pos=vm.go:585 component=virt-handler namespace=kubevirt-test-default name=testvmixkm2m kind=VirtualMachineInstance uid= msg="Signaled deletion for testvmixkm2m" level=info timestamp=2018-08-06T19:09:40.181944Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-08-06T19:09:40.182238Z pos=vm.go:746 component=virt-handler namespace=kubevirt-test-default name=testvmixkm2m kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-08-06T19:09:40.184023Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmixkm2m kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-08-06T19:09:40.188271Z pos=vm.go:313 component=virt-handler msg="Processing vmi testvmixkm2m, existing: false\n" level=info timestamp=2018-08-06T19:09:40.188379Z pos=vm.go:329 component=virt-handler msg="Domain: existing: false\n" level=info timestamp=2018-08-06T19:09:40.188560Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmixkm2m kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-08-06T19:09:40.188782Z pos=vm.go:440 component=virt-handler namespace=kubevirt-test-default name=testvmixkm2m kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmi59fpx-lxwg4 Pod phase: Running Unable to retrieve container logs for docker://bd6f607f371ee2428499c2fdefd6cb4719f534981345a8462839802e7b8bee1e Pod name: virt-launcher-testvmil8lqx-hx8p8 Pod phase: Running level=info timestamp=2018-08-06T19:10:36.580144Z pos=server.go:96 component=virt-launcher namespace=kubevirt-test-default name=testvmil8lqx kind= uid=5bf12d28-99ac-11e8-ab32-525500d15501 msg="Signaled vmi kill" level=info timestamp=2018-08-06T19:10:36.583117Z pos=client.go:136 component=virt-launcher msg="Libvirt event 5 with reason 1 received" level=info timestamp=2018-08-06T19:10:36.602812Z pos=manager.go:306 component=virt-launcher namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Domain not running or paused, nothing to do." level=info timestamp=2018-08-06T19:10:36.602933Z pos=server.go:96 component=virt-launcher namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Signaled vmi kill" level=info timestamp=2018-08-06T19:10:36.607151Z pos=client.go:119 component=virt-launcher msg="domain status: 5:2" level=info timestamp=2018-08-06T19:10:36.608883Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:10:36.612664Z pos=manager.go:329 component=virt-launcher namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Domain undefined." level=info timestamp=2018-08-06T19:10:36.612800Z pos=server.go:140 component=virt-launcher namespace=kubevirt-test-default name=testvmil8lqx kind=VirtualMachineInstance uid= msg="Signaled vmi deletion" level=info timestamp=2018-08-06T19:10:36.857615Z pos=monitor.go:231 component=virt-launcher msg="Process 8bdc561c-6959-484c-9238-44e786197536 and pid 187 is gone!" level=info timestamp=2018-08-06T19:10:36.858480Z pos=client.go:136 component=virt-launcher msg="Libvirt event 1 with reason 0 received" level=info timestamp=2018-08-06T19:10:36.859107Z pos=virt-launcher.go:233 component=virt-launcher msg="Waiting on final notifications to be sent to virt-handler." level=info timestamp=2018-08-06T19:10:36.859917Z pos=client.go:119 component=virt-launcher msg="domain status: 0:0" level=info timestamp=2018-08-06T19:10:36.861821Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-08-06T19:10:36.862032Z pos=virt-launcher.go:241 component=virt-launcher msg="Final Delete notification sent" level=info timestamp=2018-08-06T19:10:36.862058Z pos=virt-launcher.go:348 component=virt-launcher msg=Exiting... Pod name: virt-launcher-testvmin6wr6-x9rtz Pod phase: Pending • Failure [0.860 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 should reject POST if schema is invalid [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:99 Expected : 201 to equal : 422 /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:111 ------------------------------ ••• ------------------------------ • [SLOW TEST:52.660 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:174 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Alpine as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:26.107 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:174 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Cirros as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:16.123 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:205 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:206 should retry starting the VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:207 ------------------------------ • [SLOW TEST:17.462 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:205 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:206 should log warning and proceed once the secret is there /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:237 ------------------------------ • [SLOW TEST:47.726 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 when virt-launcher crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:285 should be stopped and have Failed phase /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:286 ------------------------------ • [SLOW TEST:30.164 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 when virt-handler crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:309 should recover and continue management /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:310 ------------------------------ • [SLOW TEST:10.841 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 when virt-handler is responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:340 should indicate that a node is ready for vmis /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:341 ------------------------------ • [SLOW TEST:79.010 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 when virt-handler is not responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:371 the node controller should react /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:410 ------------------------------ • [SLOW TEST:17.603 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 with node tainted /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:463 the vmi with tolerations should be scheduled /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:485 ------------------------------ • ------------------------------ • [SLOW TEST:45.143 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:535 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-default /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:40.036 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:535 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-alternative /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.099 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:592 should enable emulation in virt-launcher [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:604 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:600 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.093 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:592 should be reflected in domain XML [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:641 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:600 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.067 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:74 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:592 should request a TUN device but not KVM [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:685 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:600 ------------------------------ •••• ------------------------------ • [SLOW TEST:33.506 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Delete a VirtualMachineInstance's Pod /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:837 should result in the VirtualMachineInstance moving to a finalized state /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:838 ------------------------------ • [SLOW TEST:36.280 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:869 with an active pod. /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:870 should result in pod being terminated /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:871 ------------------------------ • [SLOW TEST:50.638 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:869 with ACPI and 0 grace period seconds /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:895 should result in vmi status failed /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:896 ------------------------------ • [SLOW TEST:52.742 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:869 with ACPI and some grace period seconds /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:920 should result in vmi status succeeded /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:921 ------------------------------ • [SLOW TEST:21.804 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:869 with grace period greater than 0 /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:945 should run graceful shutdown /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:946 ------------------------------ • [SLOW TEST:30.105 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:997 should be in Failed phase /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:998 ------------------------------ • [SLOW TEST:25.999 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:52 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:997 should be left alone by virt-handler /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:1025 ------------------------------ • ------------------------------ • [SLOW TEST:9.903 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should scale /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 to five, to six and then to zero replicas /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •• ------------------------------ • [SLOW TEST:17.931 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should update readyReplicas once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:157 ------------------------------ • [SLOW TEST:5.506 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should remove VMIs once it is marked for deletion /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:169 ------------------------------ • ------------------------------ • [SLOW TEST:5.492 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should not scale when paused and scale when resume /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:223 ------------------------------ ••••••••••••••• ------------------------------ • [SLOW TEST:18.407 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should update VirtualMachine once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:195 ------------------------------ •• ------------------------------ • [SLOW TEST:28.415 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if it gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:245 ------------------------------ • [SLOW TEST:54.574 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if the VirtualMachineInstance's pod gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:265 ------------------------------ • [SLOW TEST:35.452 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should stop VirtualMachineInstance if running set to false /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:325 ------------------------------ • [SLOW TEST:189.268 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should start and stop VirtualMachineInstance multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:333 ------------------------------ • [SLOW TEST:38.332 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should not update the VirtualMachineInstance spec if Running /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:346 ------------------------------ • [SLOW TEST:188.054 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should survive guest shutdown, multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:387 ------------------------------ VM testvmi7t2js was scheduled to start • [SLOW TEST:17.399 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should start a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:436 ------------------------------ VM testvmi96n9k was scheduled to stop • [SLOW TEST:26.428 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should stop a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:467 ------------------------------ • [SLOW TEST:104.340 seconds] Slirp /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:39 should be able to /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 VirtualMachineInstance with slirp interface /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • ------------------------------ • [SLOW TEST:62.039 seconds] Health Monitoring /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:37 A VirtualMachineInstance with a watchdog device /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:56 should be shut down when the watchdog expires /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:57 ------------------------------ • [SLOW TEST:35.518 seconds] LeaderElection /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:43 Start a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:53 when the controller pod is not running /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:54 should success /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:55 ------------------------------ • [SLOW TEST:10.462 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:8.172 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given an vm /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:8.074 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi preset /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:8.008 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi replica set /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •••• ------------------------------ • [SLOW TEST:17.529 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should successfully start with hook sidecar annotation /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:60 ------------------------------ • [SLOW TEST:18.027 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should call Collect and OnDefineDomain on the hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:67 ------------------------------ • [SLOW TEST:20.003 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should update domain XML with SM BIOS properties /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:83 ------------------------------ Waiting for namespace kubevirt-test-default to be removed, this can take a while ... Waiting for namespace kubevirt-test-alternative to be removed, this can take a while ... Summarizing 3 Failures: [Fail] Storage Starting a VirtualMachineInstance With ephemeral alpine PVC [It] should not persist data /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1078 [Fail] Networking VirtualMachineInstance with custom MAC address in non-conventional format [It] should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1091 [Fail] VMIlifecycle Creating a VirtualMachineInstance [It] should reject POST if schema is invalid /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:111 Ran 140 of 151 Specs in 4477.771 seconds FAIL! -- 137 Passed | 3 Failed | 0 Pending | 11 Skipped --- FAIL: TestTests (4477.79s) FAIL make: *** [functest] Error 1 + make cluster-down ./cluster/down.sh