+ export WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + [[ openshift-3.10-release =~ openshift-.* ]] + [[ openshift-3.10-release =~ .*-crio-.* ]] + export KUBEVIRT_PROVIDER=os-3.10.0 + KUBEVIRT_PROVIDER=os-3.10.0 + export KUBEVIRT_NUM_NODES=2 + KUBEVIRT_NUM_NODES=2 + export NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + export NAMESPACE=kube-system + NAMESPACE=kube-system + trap '{ make cluster-down; }' EXIT SIGINT SIGTERM SIGSTOP + make cluster-down ./cluster/down.sh + make cluster-up ./cluster/up.sh Downloading ....... Downloading ....... 2018/07/26 18:41:01 Waiting for host: 192.168.66.102:22 2018/07/26 18:41:04 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 18:41:12 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 18:41:20 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 18:41:28 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 18:41:36 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 18:41:41 Connected to tcp://192.168.66.102:22 + systemctl stop origin-node.service + rm -rf /etc/origin/ /etc/etcd/ /var/lib/origin /var/lib/etcd/ ++ docker ps -q + containers= + '[' -n '' ']' ++ docker ps -q -a + containers='2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3' + '[' -n '2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3' ']' + docker rm -f 2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3 2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3 2018/07/26 18:41:45 Waiting for host: 192.168.66.101:22 2018/07/26 18:41:48 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 18:41:56 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 18:42:04 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 18:42:12 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 18:42:17 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: connection refused. Sleeping 5s 2018/07/26 18:42:22 Connected to tcp://192.168.66.101:22 + inventory_file=/root/inventory + openshift_ansible=/root/openshift-ansible + echo '[new_nodes]' + sed -i '/\[OSEv3:children\]/a new_nodes' /root/inventory + nodes_found=false ++ seq 2 100 + for i in '$(seq 2 100)' ++ printf node%02d 2 + node=node02 ++ printf 192.168.66.1%02d 2 + node_ip=192.168.66.102 + set +e + ping 192.168.66.102 -c 1 PING 192.168.66.102 (192.168.66.102) 56(84) bytes of data. 64 bytes from 192.168.66.102: icmp_seq=1 ttl=64 time=2.65 ms --- 192.168.66.102 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 2.655/2.655/2.655/0.000 ms Found node02. Adding it to the inventory. + '[' 0 -ne 0 ']' + nodes_found=true + set -e + echo '192.168.66.102 node02' + echo 'Found node02. Adding it to the inventory.' + echo 'node02 openshift_node_group_name="node-config-compute" openshift_schedulable=true openshift_ip=192.168.66.102' + for i in '$(seq 2 100)' ++ printf node%02d 3 + node=node03 ++ printf 192.168.66.1%02d 3 + node_ip=192.168.66.103 + set +e + ping 192.168.66.103 -c 1 PING 192.168.66.103 (192.168.66.103) 56(84) bytes of data. From 192.168.66.101 icmp_seq=1 Destination Host Unreachable --- 192.168.66.103 ping statistics --- 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms + '[' 1 -ne 0 ']' + break + '[' true = true ']' + ansible-playbook -i /root/inventory /root/openshift-ansible/playbooks/openshift-node/scaleup.yml PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) PLAY [Ensure there are new_nodes] ********************************************** TASK [fail] ******************************************************************** skipping: [localhost] TASK [fail] ******************************************************************** skipping: [localhost] PLAY [Initialization Checkpoint Start] ***************************************** TASK [Set install initialization 'In Progress'] ******************************** ok: [node01] PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) [WARNING]: Could not match supplied host pattern, ignoring: oo_lb_to_config PLAY [Ensure that all non-node hosts are accessible] *************************** TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Initialize basic host facts] ********************************************* TASK [Gathering Facts] ********************************************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/deprecations.yml for node01, node02 TASK [openshift_sanitize_inventory : Check for usage of deprecated variables] *** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : debug] ************************************ skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_stats] ******************************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Assign deprecated variables to correct counterparts] *** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml for node01, node02 included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml for node01, node02 TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Standardize on latest variable names] ***** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Normalize openshift_release] ************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/unsupported.yml for node01, node02 TASK [openshift_sanitize_inventory : Ensure that openshift_use_dnsmasq is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that openshift_node_dnsmasq_install_network_manager_hook is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* skipping: [node01] => (item=openshift_hosted_etcd_storage_kind) skipping: [node02] => (item=openshift_hosted_etcd_storage_kind) TASK [openshift_sanitize_inventory : Ensure that dynamic provisioning is set if using dynamic storage] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure clusterid is set along with the cloudprovider] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that all requires vsphere configuration variables are set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : ensure provider configuration variables are defined] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure removed web console extension variables are not set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that web console port matches API server port] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : At least one master is schedulable] ******* skipping: [node01] skipping: [node02] TASK [Detecting Operating System from ostree_booted] *************************** ok: [node02] ok: [node01] TASK [set openshift_deployment_type if unset] ********************************** skipping: [node01] skipping: [node02] TASK [check for node already bootstrapped] ************************************* ok: [node02] ok: [node01] TASK [initialize_facts set fact openshift_is_bootstrapped] ********************* ok: [node01] ok: [node02] TASK [initialize_facts set fact openshift_is_atomic and openshift_is_containerized] *** ok: [node01] ok: [node02] TASK [Determine Atomic Host Docker Version] ************************************ skipping: [node01] skipping: [node02] TASK [assert atomic host docker version is 1.12 or later] ********************** skipping: [node01] skipping: [node02] PLAY [Retrieve existing master configs and validate] *************************** TASK [openshift_control_plane : stat] ****************************************** ok: [node01] TASK [openshift_control_plane : slurp] ***************************************** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [openshift_control_plane : Check for file paths outside of /etc/origin/master in master's config] *** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Initialize special first-master variables] ******************************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Disable web console if required] ***************************************** TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Setup yum repositories for all hosts] ************************************ TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Install Red Hat Subscription manager] ******************* skipping: [node02] TASK [rhel_subscribe : Is host already registered?] **************************** skipping: [node02] TASK [rhel_subscribe : Register host] ****************************************** skipping: [node02] TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Determine if OpenShift Pool Already Attached] *********** skipping: [node02] TASK [rhel_subscribe : Attach to OpenShift Pool] ******************************* skipping: [node02] TASK [rhel_subscribe : Satellite preparation] ********************************** skipping: [node02] TASK [openshift_repos : openshift_repos detect ostree] ************************* ok: [node02] TASK [openshift_repos : Ensure libselinux-python is installed] ***************** ok: [node02] TASK [openshift_repos : Remove openshift_additional.repo file] ***************** ok: [node02] TASK [openshift_repos : Create any additional repos that are defined] ********** TASK [openshift_repos : include_tasks] ***************************************** skipping: [node02] TASK [openshift_repos : include_tasks] ***************************************** included: /root/openshift-ansible/roles/openshift_repos/tasks/centos_repos.yml for node02 TASK [openshift_repos : Configure origin gpg keys] ***************************** ok: [node02] TASK [openshift_repos : Configure correct origin release repository] *********** ok: [node02] => (item=/root/openshift-ansible/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2) TASK [openshift_repos : Ensure clean repo cache in the event repos have been changed manually] *** changed: [node02] => { "msg": "First run of openshift_repos" } TASK [openshift_repos : Record that openshift_repos already ran] *************** ok: [node02] RUNNING HANDLER [openshift_repos : refresh cache] ****************************** changed: [node02] PLAY [Install packages necessary for installer] ******************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Determine if chrony is installed] **************************************** changed: [node02] [WARNING]: Consider using the yum, dnf or zypper module rather than running rpm. If you need to use command because yum, dnf or zypper is insufficient you can add warn=False to this command task or set command_warnings=False in ansible.cfg to get rid of this message. TASK [Install ntp package] ***************************************************** skipping: [node02] TASK [Start and enable ntpd/chronyd] ******************************************* changed: [node02] TASK [Ensure openshift-ansible installer package deps are installed] *********** ok: [node02] => (item=iproute) ok: [node02] => (item=dbus-python) ok: [node02] => (item=PyYAML) ok: [node02] => (item=python-ipaddress) ok: [node02] => (item=libsemanage-python) ok: [node02] => (item=yum-utils) ok: [node02] => (item=python-docker) PLAY [Initialize cluster facts] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [get openshift_current_version] ******************************************* ok: [node02] ok: [node01] TASK [set_fact openshift_portal_net if present on masters] ********************* ok: [node01] ok: [node02] TASK [Gather Cluster facts] **************************************************** changed: [node02] changed: [node01] TASK [Set fact of no_proxy_internal_hostnames] ********************************* skipping: [node01] skipping: [node02] TASK [Initialize openshift.node.sdn_mtu] *************************************** changed: [node02] ok: [node01] PLAY [Initialize etcd host variables] ****************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** ok: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] => { "msg": "openshift_pkg_version was not defined. Falling back to -3.10.0" } TASK [openshift_version : set_fact] ******************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Verify Requirements] ***************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [Run variable sanity checks] ********************************************** ok: [node01] TASK [Validate openshift_node_groups and openshift_node_group_name] ************ ok: [node01] PLAY [Initialization Checkpoint End] ******************************************* TASK [Set install initialization 'Complete'] *********************************** ok: [node01] PLAY [Validate node hostnames] ************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Query DNS for IP address of node02] ************************************** ok: [node02] TASK [Validate openshift_hostname when defined] ******************************** skipping: [node02] TASK [Validate openshift_ip exists on node when defined] *********************** skipping: [node02] PLAY [Configure os_firewall] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [os_firewall : Detecting Atomic Host Operating System] ******************** ok: [node02] TASK [os_firewall : Set fact r_os_firewall_is_atomic] ************************** ok: [node02] TASK [os_firewall : Fail - Firewalld is not supported on Atomic Host] ********** skipping: [node02] TASK [os_firewall : Install firewalld packages] ******************************** skipping: [node02] TASK [os_firewall : Ensure iptables services are not enabled] ****************** skipping: [node02] => (item=iptables) skipping: [node02] => (item=ip6tables) TASK [os_firewall : Wait 10 seconds after disabling iptables] ****************** skipping: [node02] TASK [os_firewall : Start and enable firewalld service] ************************ skipping: [node02] TASK [os_firewall : need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail] *** skipping: [node02] TASK [os_firewall : Restart polkitd] ******************************************* skipping: [node02] TASK [os_firewall : Wait for polkit action to have been created] *************** skipping: [node02] TASK [os_firewall : Ensure firewalld service is not enabled] ******************* ok: [node02] TASK [os_firewall : Wait 10 seconds after disabling firewalld] ***************** skipping: [node02] TASK [os_firewall : Install iptables packages] ********************************* ok: [node02] => (item=iptables) ok: [node02] => (item=iptables-services) TASK [os_firewall : Start and enable iptables service] ************************* ok: [node02 -> node02] => (item=node02) TASK [os_firewall : need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail] *** skipping: [node02] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [container_runtime : Setup the docker-storage for overlay] **************** skipping: [node02] TASK [container_runtime : Create file system on extra volume device] *********** TASK [container_runtime : Create mount entry for extra volume] ***************** PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** ok: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* ok: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Add http_proxy to /etc/atomic.conf] ****************** skipping: [node02] TASK [container_runtime : Add https_proxy to /etc/atomic.conf] ***************** skipping: [node02] TASK [container_runtime : Add no_proxy to /etc/atomic.conf] ******************** skipping: [node02] TASK [container_runtime : Get current installed Docker version] **************** ok: [node02] TASK [container_runtime : Error out if Docker pre-installed but too old] ******* skipping: [node02] TASK [container_runtime : Error out if requested Docker is too old] ************ skipping: [node02] TASK [container_runtime : Install Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure docker.service.d directory exists] ************ ok: [node02] TASK [container_runtime : Configure Docker service unit file] ****************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Set registry params] ********************************* skipping: [node02] => (item={u'reg_conf_var': u'ADD_REGISTRY', u'reg_flag': u'--add-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'BLOCK_REGISTRY', u'reg_flag': u'--block-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'INSECURE_REGISTRY', u'reg_flag': u'--insecure-registry', u'reg_fact_val': []}) TASK [container_runtime : Place additional/blocked/insecure registries in /etc/containers/registries.conf] *** skipping: [node02] TASK [container_runtime : Set Proxy Settings] ********************************** skipping: [node02] => (item={u'reg_conf_var': u'HTTP_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'HTTPS_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'NO_PROXY', u'reg_fact_val': u''}) TASK [container_runtime : Set various Docker options] ************************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Configure Docker Network OPTIONS] ******************** ok: [node02] TASK [container_runtime : Detect if docker is already started] ***************** ok: [node02] TASK [container_runtime : Start the Docker service] **************************** ok: [node02] TASK [container_runtime : set_fact] ******************************************** ok: [node02] TASK [container_runtime : Check for docker_storage_path/overlay2] ************** ok: [node02] TASK [container_runtime : Fixup SELinux permissions for docker] **************** changed: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] TASK [container_runtime : Fail if Atomic Host since this is an rpm request] **** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Check that overlay is in the kernel] ***************** skipping: [node02] TASK [container_runtime : Add overlay to modprobe.d] *************************** skipping: [node02] TASK [container_runtime : Manually modprobe overlay into the kernel] *********** skipping: [node02] TASK [container_runtime : Enable and start systemd-modules-load] *************** skipping: [node02] TASK [container_runtime : Install cri-o] *************************************** skipping: [node02] TASK [container_runtime : Remove CRI-O default configuration files] ************ skipping: [node02] => (item=/etc/cni/net.d/200-loopback.conf) skipping: [node02] => (item=/etc/cni/net.d/100-crio-bridge.conf) TASK [container_runtime : Create the CRI-O configuration] ********************** skipping: [node02] TASK [container_runtime : Ensure CNI configuration directory exists] *********** skipping: [node02] TASK [container_runtime : Add iptables allow rules] **************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove iptables rules] ******************************* TASK [container_runtime : Add firewalld allow rules] *************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove firewalld allow rules] ************************ TASK [container_runtime : Configure the CNI network] *************************** skipping: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-network] ****************** skipping: [node02] TASK [container_runtime : Start the CRI-O service] ***************************** skipping: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* skipping: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** skipping: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** skipping: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** skipping: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Node Preparation Checkpoint Start] *************************************** TASK [Set Node preparation 'In Progress'] ************************************** ok: [node01] PLAY [Only target nodes that have not yet been bootstrapped] ******************* TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [add_host] **************************************************************** skipping: [localhost] => (item=node02) ok: [localhost] => (item=node01) PLAY [Disable excluders] ******************************************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/disable.yml for node02 TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-docker-excluder version detected] ************ skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-docker-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-excluder version detected] ******************* skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** skipping: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] PLAY [Configure nodes] ********************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_cloud_provider : Set cloud provider facts] ********************* skipping: [node02] TASK [openshift_cloud_provider : Create cloudprovider config dir] ************** skipping: [node02] TASK [openshift_cloud_provider : include the defined cloud provider files] ***** skipping: [node02] TASK [openshift_node : fail] *************************************************** skipping: [node02] TASK [openshift_node : Check for NetworkManager service] *********************** ok: [node02] TASK [openshift_node : Set fact using_network_manager] ************************* ok: [node02] TASK [openshift_node : Install dnsmasq] **************************************** ok: [node02] TASK [openshift_node : ensure origin/node directory exists] ******************** changed: [node02] => (item=/etc/origin) changed: [node02] => (item=/etc/origin/node) TASK [openshift_node : Install NetworkManager during node_bootstrap provisioning] *** skipping: [node02] TASK [openshift_node : Install network manager dispatch script] **************** skipping: [node02] TASK [openshift_node : Install dnsmasq configuration] ************************** ok: [node02] TASK [openshift_node : Deploy additional dnsmasq.conf] ************************* skipping: [node02] TASK [openshift_node : Enable dnsmasq] ***************************************** ok: [node02] TASK [openshift_node : Install network manager dispatch script] **************** ok: [node02] TASK [openshift_node : Add iptables allow rules] ******************************* ok: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) ok: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) ok: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) ok: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) ok: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove iptables rules] ********************************** TASK [openshift_node : Add firewalld allow rules] ****************************** skipping: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) skipping: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) skipping: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) skipping: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) skipping: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove firewalld allow rules] *************************** TASK [openshift_node : Checking for journald.conf] ***************************** ok: [node02] TASK [openshift_node : Create journald persistence directories] **************** ok: [node02] TASK [openshift_node : Update journald setup] ********************************** ok: [node02] => (item={u'var': u'Storage', u'val': u'persistent'}) ok: [node02] => (item={u'var': u'Compress', u'val': True}) ok: [node02] => (item={u'var': u'SyncIntervalSec', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitInterval', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitBurst', u'val': 10000}) ok: [node02] => (item={u'var': u'SystemMaxUse', u'val': u'8G'}) ok: [node02] => (item={u'var': u'SystemKeepFree', u'val': u'20%'}) ok: [node02] => (item={u'var': u'SystemMaxFileSize', u'val': u'10M'}) ok: [node02] => (item={u'var': u'MaxRetentionSec', u'val': u'1month'}) ok: [node02] => (item={u'var': u'MaxFileSec', u'val': u'1day'}) ok: [node02] => (item={u'var': u'ForwardToSyslog', u'val': False}) ok: [node02] => (item={u'var': u'ForwardToWall', u'val': False}) TASK [openshift_node : Restart journald] *************************************** skipping: [node02] TASK [openshift_node : Disable swap] ******************************************* ok: [node02] TASK [openshift_node : Install node, clients, and conntrack packages] ********** ok: [node02] => (item={u'name': u'origin-node-3.10.0*'}) ok: [node02] => (item={u'name': u'origin-clients-3.10.0*'}) ok: [node02] => (item={u'name': u'conntrack-tools'}) TASK [openshift_node : Restart cri-o] ****************************************** skipping: [node02] TASK [openshift_node : restart NetworkManager to ensure resolv.conf is present] *** changed: [node02] TASK [openshift_node : sysctl] ************************************************* ok: [node02] TASK [openshift_node : Check for credentials file for registry auth] *********** skipping: [node02] TASK [openshift_node : Create credentials for registry auth] ******************* skipping: [node02] TASK [openshift_node : Create credentials for registry auth (alternative)] ***** skipping: [node02] TASK [openshift_node : Setup ro mount of /root/.docker for containerized hosts] *** skipping: [node02] TASK [openshift_node : Check that node image is present] *********************** changed: [node02] TASK [openshift_node : Pre-pull node image] ************************************ skipping: [node02] TASK [openshift_node : Copy node script to the node] *************************** ok: [node02] TASK [openshift_node : Install Node service file] ****************************** ok: [node02] TASK [openshift_node : Ensure old system path is set] ************************** skipping: [node02] => (item=/etc/origin/openvswitch) skipping: [node02] => (item=/var/lib/kubelet) skipping: [node02] => (item=/opt/cni/bin) TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Copy node container image to ostree storage] ************ skipping: [node02] TASK [openshift_node : Install or Update node system container] **************** skipping: [node02] TASK [openshift_node : Restart network manager to ensure networking configuration is in place] *** skipping: [node02] TASK [openshift_node : Configure Node settings] ******************************** ok: [node02] => (item={u'regex': u'^OPTIONS=', u'line': u'OPTIONS='}) ok: [node02] => (item={u'regex': u'^DEBUG_LOGLEVEL=', u'line': u'DEBUG_LOGLEVEL=2'}) ok: [node02] => (item={u'regex': u'^IMAGE_VERSION=', u'line': u'IMAGE_VERSION=v3.10.0-rc.0'}) TASK [openshift_node : Configure Proxy Settings] ******************************* skipping: [node02] => (item={u'regex': u'^HTTP_PROXY=', u'line': u'HTTP_PROXY='}) skipping: [node02] => (item={u'regex': u'^HTTPS_PROXY=', u'line': u'HTTPS_PROXY='}) skipping: [node02] => (item={u'regex': u'^NO_PROXY=', u'line': u'NO_PROXY=[],172.30.0.0/16,10.128.0.0/14'}) TASK [openshift_node : file] *************************************************** skipping: [node02] TASK [openshift_node : Create the Node config] ********************************* changed: [node02] TASK [openshift_node : Configure Node Environment Variables] ******************* TASK [openshift_node : Ensure the node static pod directory exists] ************ changed: [node02] TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Install NFS storage plugin dependencies] **************** ok: [node02] TASK [openshift_node : Check for existence of nfs sebooleans] ****************** ok: [node02] => (item=virt_use_nfs) ok: [node02] => (item=virt_sandbox_use_nfs) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 18:51:33.708517', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.015089', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 18:51:33.693428', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 18:51:35.255299', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.007569', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 18:51:35.247730', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 18:51:33.708517', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.015089', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 18:51:33.693428', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 18:51:35.255299', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.007569', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 18:51:35.247730', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install GlusterFS storage plugin dependencies] ********** ok: [node02] TASK [openshift_node : Check for existence of fusefs sebooleans] *************** ok: [node02] => (item=virt_use_fusefs) ok: [node02] => (item=virt_sandbox_use_fusefs) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 18:51:43.992077', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.006314', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 18:51:43.985763', '_ansible_ignore_errors': None, 'failed': False}) ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 18:51:45.511689', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.011021', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 18:51:45.500668', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 18:51:43.992077', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.006314', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 18:51:43.985763', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 18:51:45.511689', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.011021', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 18:51:45.500668', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install Ceph storage plugin dependencies] *************** ok: [node02] TASK [openshift_node : Install iSCSI storage plugin dependencies] ************** ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=device-mapper-multipath) TASK [openshift_node : restart services] *************************************** ok: [node02] => (item=multipathd) ok: [node02] => (item=rpcbind) ok: [node02] => (item=iscsid) TASK [openshift_node : Template multipath configuration] *********************** changed: [node02] TASK [openshift_node : Enable and start multipath] ***************************** changed: [node02] TASK [tuned : Check for tuned package] ***************************************** ok: [node02] TASK [tuned : Set tuned OpenShift variables] *********************************** ok: [node02] TASK [tuned : Ensure directory structure exists] ******************************* ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Ensure files are populated from templates] *********************** skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Make tuned use the recommended tuned profile on restart] ********* changed: [node02] => (item=/etc/tuned/active_profile) changed: [node02] => (item=/etc/tuned/profile_mode) TASK [tuned : Restart tuned service] ******************************************* changed: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Install logrotate] ******* ok: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Setup logrotate.d scripts] *** PLAY [node bootstrap config] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_node : install needed rpm(s)] ********************************** ok: [node02] => (item=origin-node) ok: [node02] => (item=origin-docker-excluder) ok: [node02] => (item=ansible) ok: [node02] => (item=bash-completion) ok: [node02] => (item=docker) ok: [node02] => (item=haproxy) ok: [node02] => (item=dnsmasq) ok: [node02] => (item=ntp) ok: [node02] => (item=logrotate) ok: [node02] => (item=httpd-tools) ok: [node02] => (item=bind-utils) ok: [node02] => (item=firewalld) ok: [node02] => (item=libselinux-python) ok: [node02] => (item=conntrack-tools) ok: [node02] => (item=openssl) ok: [node02] => (item=iproute) ok: [node02] => (item=python-dbus) ok: [node02] => (item=PyYAML) ok: [node02] => (item=yum-utils) ok: [node02] => (item=glusterfs-fuse) ok: [node02] => (item=device-mapper-multipath) ok: [node02] => (item=nfs-utils) ok: [node02] => (item=cockpit-ws) ok: [node02] => (item=cockpit-system) ok: [node02] => (item=cockpit-bridge) ok: [node02] => (item=cockpit-docker) ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=ceph-common) TASK [openshift_node : create the directory for node] ************************** skipping: [node02] TASK [openshift_node : laydown systemd override] ******************************* skipping: [node02] TASK [openshift_node : update the sysconfig to have necessary variables] ******* ok: [node02] => (item={u'regexp': u'^KUBECONFIG=.*', u'line': u'KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig'}) TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : disable origin-node service] **************************** changed: [node02] => (item=origin-node.service) TASK [openshift_node : Check for RPM generated config marker file .config_managed] *** ok: [node02] TASK [openshift_node : create directories for bootstrapping] ******************* ok: [node02] => (item=/root/openshift_bootstrap) changed: [node02] => (item=/var/lib/origin/openshift.local.config) changed: [node02] => (item=/var/lib/origin/openshift.local.config/node) ok: [node02] => (item=/etc/docker/certs.d/docker-registry.default.svc:5000) TASK [openshift_node : laydown the bootstrap.yml file for on boot configuration] *** ok: [node02] TASK [openshift_node : Create a symlink to the node client CA for the docker registry] *** ok: [node02] TASK [openshift_node : Remove RPM generated config files if present] *********** skipping: [node02] => (item=master) skipping: [node02] => (item=.config_managed) TASK [openshift_node : find all files in /etc/origin/node so we can remove them] *** skipping: [node02] TASK [openshift_node : Remove everything except the resolv.conf required for node] *** skipping: [node02] TASK [openshift_node_group : create node config template] ********************** changed: [node02] TASK [openshift_node_group : remove existing node config] ********************** changed: [node02] TASK [openshift_node_group : Ensure required directories are present] ********** ok: [node02] => (item=/etc/origin/node/pods) changed: [node02] => (item=/etc/origin/node/certificates) TASK [openshift_node_group : Update the sysconfig to group "node-config-compute"] *** changed: [node02] TASK [set_fact] **************************************************************** ok: [node02] PLAY [Re-enable excluder if it was previously enabled] ************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/enable.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] PLAY [Node Preparation Checkpoint End] ***************************************** TASK [Set Node preparation 'Complete'] ***************************************** ok: [node01] PLAY [Distribute bootstrap and start nodes] ************************************ TASK [openshift_node : Gather node information] ******************************** changed: [node02] ok: [node01] TASK [openshift_node : Copy master bootstrap config locally] ******************* ok: [node02] TASK [openshift_node : Distribute bootstrap kubeconfig if one does not exist] *** ok: [node01] changed: [node02] TASK [openshift_node : Start and enable node for bootstrapping] **************** changed: [node02] changed: [node01] TASK [openshift_node : Get node logs] ****************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : debug] ************************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : fail] *************************************************** skipping: [node02] skipping: [node01] PLAY [Approve any pending CSR requests from inventory nodes] ******************* TASK [Dump all candidate bootstrap hostnames] ********************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Find all hostnames for bootstrapping] ************************************ ok: [node01] TASK [Dump the bootstrap hostnames] ******************************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Approve bootstrap nodes] ************************************************* changed: [node01] TASK [Get CSRs] **************************************************************** skipping: [node01] TASK [Report approval errors] ************************************************** skipping: [node01] PLAY [Ensure any inventory labels are applied to the nodes] ******************** TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_manage_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_manage_node : Wait for Node Registration] ********************** ok: [node02 -> node01] ok: [node01 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/config.yml for node02, node01 TASK [openshift_manage_node : Set node schedulability] ************************* ok: [node01 -> node01] ok: [node02 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/set_default_node_role.yml for node02, node01 TASK [openshift_manage_node : Retrieve nodes that are marked with the infra selector or the legacy infra selector] *** ok: [node02 -> node01] TASK [openshift_manage_node : Label infra or legacy infra nodes with the new role label] *** TASK [openshift_manage_node : Retrieve non-infra, non-master nodes that are not yet labeled compute] *** ok: [node02 -> node01] TASK [openshift_manage_node : label non-master non-infra nodes compute] ******** TASK [openshift_manage_node : Label all-in-one master as a compute node] ******* skipping: [node02] PLAY RECAP ********************************************************************* localhost : ok=30 changed=0 unreachable=0 failed=0 node01 : ok=71 changed=3 unreachable=0 failed=0 node02 : ok=155 changed=33 unreachable=0 failed=0 INSTALLER STATUS *************************************************************** Initialization : Complete (0:04:12) Node Preparation : Complete (0:05:25) Sending file modes: C0755 110489328 oc Sending file modes: C0600 5649 admin.kubeconfig Cluster "node01:8443" set. Cluster "node01:8443" set. + set +e + kubectl get nodes --no-headers + cluster/kubectl.sh get nodes --no-headers node01 Ready compute,infra,master 18d v1.10.0+b81c8f8 node02 Ready compute 1m v1.10.0+b81c8f8 + kubectl_rc=0 + '[' 0 -ne 0 ']' ++ kubectl get nodes --no-headers ++ cluster/kubectl.sh get nodes --no-headers ++ grep NotReady + '[' -n '' ']' + set -e + echo 'Nodes are ready:' Nodes are ready: + kubectl get nodes + cluster/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION node01 Ready compute,infra,master 18d v1.10.0+b81c8f8 node02 Ready compute 1m v1.10.0+b81c8f8 + make cluster-sync ./cluster/build.sh Building ... Sending build context to Docker daemon 5.632 kB Step 1/12 : FROM fedora:28 ---> cc510acfcd70 Step 2/12 : ENV LIBVIRT_VERSION 4.2.0 ---> Using cache ---> b1088795aeb6 Step 3/12 : RUN curl --output /etc/yum.repos.d/fedora-virt-preview.repo https://fedorapeople.org/groups/virt/virt-preview/fedora-virt-preview.repo ---> Using cache ---> 88f43b954f9f Step 4/12 : RUN dnf -y install libvirt-devel-${LIBVIRT_VERSION} make git mercurial sudo gcc findutils gradle rsync-daemon rsync qemu-img protobuf-compiler && dnf -y clean all ---> Using cache ---> 06c70b43758a Step 5/12 : ENV GIMME_GO_VERSION 1.10 ---> Using cache ---> e5b3ae738662 Step 6/12 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> 3e3d43f49e45 Step 7/12 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 73e8a3aa263a Step 8/12 : ADD rsyncd.conf /etc/rsyncd.conf ---> Using cache ---> bc244b1c712b Step 9/12 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/mattn/goveralls && go get -u github.com/Masterminds/glide && go get golang.org/x/tools/cmd/goimports && git clone https://github.com/mvdan/sh.git $GOPATH/src/mvdan.cc/sh && cd /go/src/mvdan.cc/sh/cmd/shfmt && git checkout v2.5.0 && go get mvdan.cc/sh/cmd/shfmt && go install && go get -u github.com/golang/mock/gomock && go get -u github.com/rmohr/mock/mockgen && go get -u github.com/rmohr/go-swagger-utils/swagger-doc && go get -u github.com/onsi/ginkgo/ginkgo && go get -u -d k8s.io/code-generator/cmd/deepcopy-gen && go get -u -d k8s.io/code-generator/cmd/defaulter-gen && go get -u -d k8s.io/code-generator/cmd/openapi-gen && cd /go/src/k8s.io/code-generator/cmd/deepcopy-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/defaulter-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/openapi-gen && git checkout release-1.9 && go install && go get -u -d github.com/golang/protobuf/protoc-gen-go && cd /go/src/github.com/golang/protobuf/protoc-gen-go && git checkout 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 && go install ---> Using cache ---> 4cd1786b2bc8 Step 10/12 : RUN pip install j2cli ---> Using cache ---> b51a532fa53a Step 11/12 : ADD entrypoint.sh /entrypoint.sh ---> Using cache ---> 3bc0185264f6 Step 12/12 : ENTRYPOINT /entrypoint.sh ---> Using cache ---> dcf2b21fa2ed Successfully built dcf2b21fa2ed go version go1.10 linux/amd64 go version go1.10 linux/amd64 make[1]: Entering directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' hack/dockerized "./hack/check.sh && KUBEVIRT_VERSION= ./hack/build-go.sh install " && ./hack/build-copy-artifacts.sh Sending build context to Docker daemon 5.632 kB Step 1/12 : FROM fedora:28 ---> cc510acfcd70 Step 2/12 : ENV LIBVIRT_VERSION 4.2.0 ---> Using cache ---> b1088795aeb6 Step 3/12 : RUN curl --output /etc/yum.repos.d/fedora-virt-preview.repo https://fedorapeople.org/groups/virt/virt-preview/fedora-virt-preview.repo ---> Using cache ---> 88f43b954f9f Step 4/12 : RUN dnf -y install libvirt-devel-${LIBVIRT_VERSION} make git mercurial sudo gcc findutils gradle rsync-daemon rsync qemu-img protobuf-compiler && dnf -y clean all ---> Using cache ---> 06c70b43758a Step 5/12 : ENV GIMME_GO_VERSION 1.10 ---> Using cache ---> e5b3ae738662 Step 6/12 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> 3e3d43f49e45 Step 7/12 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 73e8a3aa263a Step 8/12 : ADD rsyncd.conf /etc/rsyncd.conf ---> Using cache ---> bc244b1c712b Step 9/12 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/mattn/goveralls && go get -u github.com/Masterminds/glide && go get golang.org/x/tools/cmd/goimports && git clone https://github.com/mvdan/sh.git $GOPATH/src/mvdan.cc/sh && cd /go/src/mvdan.cc/sh/cmd/shfmt && git checkout v2.5.0 && go get mvdan.cc/sh/cmd/shfmt && go install && go get -u github.com/golang/mock/gomock && go get -u github.com/rmohr/mock/mockgen && go get -u github.com/rmohr/go-swagger-utils/swagger-doc && go get -u github.com/onsi/ginkgo/ginkgo && go get -u -d k8s.io/code-generator/cmd/deepcopy-gen && go get -u -d k8s.io/code-generator/cmd/defaulter-gen && go get -u -d k8s.io/code-generator/cmd/openapi-gen && cd /go/src/k8s.io/code-generator/cmd/deepcopy-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/defaulter-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/openapi-gen && git checkout release-1.9 && go install && go get -u -d github.com/golang/protobuf/protoc-gen-go && cd /go/src/github.com/golang/protobuf/protoc-gen-go && git checkout 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 && go install ---> Using cache ---> 4cd1786b2bc8 Step 10/12 : RUN pip install j2cli ---> Using cache ---> b51a532fa53a Step 11/12 : ADD entrypoint.sh /entrypoint.sh ---> Using cache ---> 3bc0185264f6 Step 12/12 : ENTRYPOINT /entrypoint.sh ---> Using cache ---> dcf2b21fa2ed Successfully built dcf2b21fa2ed go version go1.10 linux/amd64 go version go1.10 linux/amd64 find: '/root/go/src/kubevirt.io/kubevirt/_out/cmd': No such file or directory Compiling tests... compiled tests.test hack/build-docker.sh build Sending build context to Docker daemon 40.38 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> bfe77d5699ed Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-controller ---> Using cache ---> b00c84523b53 Step 4/8 : WORKDIR /home/virt-controller ---> Using cache ---> b76b8bd8cd39 Step 5/8 : USER 1001 ---> Using cache ---> b6d9ad9ed232 Step 6/8 : COPY virt-controller /usr/bin/virt-controller ---> 0b32ecf97795 Removing intermediate container dd554fa49f8c Step 7/8 : ENTRYPOINT /usr/bin/virt-controller ---> Running in 0082777e3462 ---> 7bde39d4e7b2 Removing intermediate container 0082777e3462 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-controller" '' ---> Running in 1dbbb409c7c3 ---> 5ff2eebc780a Removing intermediate container 1dbbb409c7c3 Successfully built 5ff2eebc780a Sending build context to Docker daemon 43.3 MB Step 1/10 : FROM kubevirt/libvirt:4.2.0 ---> 5f0bfe81a3e0 Step 2/10 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 945996802736 Step 3/10 : RUN dnf -y install socat genisoimage util-linux libcgroup-tools ethtool net-tools sudo && dnf -y clean all && test $(id -u qemu) = 107 # make sure that the qemu user really is 107 ---> Using cache ---> 672f9ab56316 Step 4/10 : COPY virt-launcher /usr/bin/virt-launcher ---> d540eaf8e154 Removing intermediate container c6a5ed305c5c Step 5/10 : COPY kubevirt-sudo /etc/sudoers.d/kubevirt ---> c4af3c6e66f5 Removing intermediate container b7cc525d0439 Step 6/10 : RUN setcap CAP_NET_BIND_SERVICE=+eip /usr/bin/qemu-system-x86_64 ---> Running in b7e7999226d6  ---> e571c4fa720a Removing intermediate container b7e7999226d6 Step 7/10 : RUN mkdir -p /usr/share/kubevirt/virt-launcher ---> Running in 6a9410dc8de4  ---> 00c91f4f4ea0 Removing intermediate container 6a9410dc8de4 Step 8/10 : COPY entrypoint.sh libvirtd.sh sock-connector /usr/share/kubevirt/virt-launcher/ ---> 7f8f1e39ef15 Removing intermediate container 4af337d6edc6 Step 9/10 : ENTRYPOINT /usr/share/kubevirt/virt-launcher/entrypoint.sh ---> Running in aa4eb3cb93a5 ---> 5892cb45b8aa Removing intermediate container aa4eb3cb93a5 Step 10/10 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-launcher" '' ---> Running in 13928bda423a ---> f2026aa032c9 Removing intermediate container 13928bda423a Successfully built f2026aa032c9 Sending build context to Docker daemon 41.67 MB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> bfe77d5699ed Step 3/5 : COPY virt-handler /usr/bin/virt-handler ---> 3c56f6d705ed Removing intermediate container 42ce6b851df0 Step 4/5 : ENTRYPOINT /usr/bin/virt-handler ---> Running in e0bc07d048b3 ---> 3725a5ac7f1c Removing intermediate container e0bc07d048b3 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-handler" '' ---> Running in 22522e5885c5 ---> 83401ac02be6 Removing intermediate container 22522e5885c5 Successfully built 83401ac02be6 Sending build context to Docker daemon 38.81 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> bfe77d5699ed Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-api ---> Using cache ---> ed1ebf600ee1 Step 4/8 : WORKDIR /home/virt-api ---> Using cache ---> 0769dad023e5 Step 5/8 : USER 1001 ---> Using cache ---> 0cb65afb0c2b Step 6/8 : COPY virt-api /usr/bin/virt-api ---> 27ce9a7aee8d Removing intermediate container 9f268ac41eea Step 7/8 : ENTRYPOINT /usr/bin/virt-api ---> Running in 0bd12edddc20 ---> d7661cfd12c5 Removing intermediate container 0bd12edddc20 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-api" '' ---> Running in d2887fb3d2a6 ---> e766c2d9a043 Removing intermediate container d2887fb3d2a6 Successfully built e766c2d9a043 Sending build context to Docker daemon 4.096 kB Step 1/7 : FROM fedora:28 ---> cc510acfcd70 Step 2/7 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> bfe77d5699ed Step 3/7 : ENV container docker ---> Using cache ---> 62847a2a1fa8 Step 4/7 : RUN mkdir -p /images/custom /images/alpine && truncate -s 64M /images/custom/disk.img && curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /images/alpine/disk.img ---> Using cache ---> 02134835a6aa Step 5/7 : ADD entrypoint.sh / ---> Using cache ---> ec0843818da7 Step 6/7 : CMD /entrypoint.sh ---> Using cache ---> 754029bb4bd2 Step 7/7 : LABEL "disks-images-provider" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> 6327b8256318 Successfully built 6327b8256318 Sending build context to Docker daemon 2.56 kB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> bfe77d5699ed Step 3/5 : ENV container docker ---> Using cache ---> 62847a2a1fa8 Step 4/5 : RUN dnf -y install procps-ng nmap-ncat && dnf -y clean all ---> Using cache ---> 207487abe7b2 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "vm-killer" '' ---> Using cache ---> 27cf5472530f Successfully built 27cf5472530f Sending build context to Docker daemon 5.12 kB Step 1/7 : FROM debian:sid ---> 68f33cf86aab Step 2/7 : MAINTAINER "David Vossel" \ ---> Using cache ---> 5734d749eb5c Step 3/7 : ENV container docker ---> Using cache ---> f8775a77966f Step 4/7 : RUN apt-get update && apt-get install -y bash curl bzip2 qemu-utils && mkdir -p /disk && rm -rf /var/lib/apt/lists/* ---> Using cache ---> 1a40cf222a61 Step 5/7 : ADD entry-point.sh / ---> Using cache ---> 77b545d92fe7 Step 6/7 : CMD /entry-point.sh ---> Using cache ---> dfe20d463305 Step 7/7 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "registry-disk-v1alpha" '' ---> Using cache ---> 5efdf368e732 Successfully built 5efdf368e732 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32965/kubevirt/registry-disk-v1alpha:devel ---> 5efdf368e732 Step 2/4 : MAINTAINER "David Vossel" \ ---> Using cache ---> 386f7e924456 Step 3/4 : RUN curl https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img > /disk/cirros.img ---> Using cache ---> f473a86e4d6a Step 4/4 : LABEL "cirros-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> a4ca4c67d45c Successfully built a4ca4c67d45c Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32965/kubevirt/registry-disk-v1alpha:devel ---> 5efdf368e732 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 369bca39dcc2 Step 3/4 : RUN curl -g -L https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 > /disk/fedora.qcow2 ---> Using cache ---> de1e81f43a59 Step 4/4 : LABEL "fedora-cloud-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> a5867eac6e05 Successfully built a5867eac6e05 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32965/kubevirt/registry-disk-v1alpha:devel ---> 5efdf368e732 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 369bca39dcc2 Step 3/4 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /disk/alpine.iso ---> Using cache ---> 1083d820f9c8 Step 4/4 : LABEL "alpine-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Using cache ---> 11512d828b9c Successfully built 11512d828b9c Sending build context to Docker daemon 35.59 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> bfe77d5699ed Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virtctl ---> Using cache ---> 985fe391c056 Step 4/8 : WORKDIR /home/virtctl ---> Using cache ---> 3b2cae8ac543 Step 5/8 : USER 1001 ---> Using cache ---> 0c06e5b4a900 Step 6/8 : COPY subresource-access-test /subresource-access-test ---> 0f4b34ac4f91 Removing intermediate container c18b77504b89 Step 7/8 : ENTRYPOINT /subresource-access-test ---> Running in 37e8284413a3 ---> 44e30a0c1d9c Removing intermediate container 37e8284413a3 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "subresource-access-test" '' ---> Running in 5898fda3402a ---> da5a40043b9b Removing intermediate container 5898fda3402a Successfully built da5a40043b9b Sending build context to Docker daemon 3.072 kB Step 1/9 : FROM fedora:28 ---> cc510acfcd70 Step 2/9 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> bfe77d5699ed Step 3/9 : ENV container docker ---> Using cache ---> 62847a2a1fa8 Step 4/9 : RUN dnf -y install make git gcc && dnf -y clean all ---> Using cache ---> d3456b1644b1 Step 5/9 : ENV GIMME_GO_VERSION 1.9.2 ---> Using cache ---> 0ba81fddbba1 Step 6/9 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> 5d33abe3f819 Step 7/9 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 783826523be1 Step 8/9 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/masterzen/winrm-cli ---> Using cache ---> 711bc8d15952 Step 9/9 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "winrmcli" '' ---> Using cache ---> fe40426b785b Successfully built fe40426b785b Sending build context to Docker daemon 36.79 MB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> e3238544ad97 Step 3/5 : COPY example-hook-sidecar /example-hook-sidecar ---> ee01eec83771 Removing intermediate container 11ad50e60dd2 Step 4/5 : ENTRYPOINT /example-hook-sidecar ---> Running in 43f91ed8b37f ---> 386cf969494c Removing intermediate container 43f91ed8b37f Step 5/5 : LABEL "example-hook-sidecar" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in 5fa10832abc4 ---> 16e65c86b18d Removing intermediate container 5fa10832abc4 Successfully built 16e65c86b18d hack/build-docker.sh push The push refers to a repository [localhost:32965/kubevirt/virt-controller] 93c2e58be8e2: Preparing aa89340cf7a8: Preparing 891e1e4ef82a: Preparing aa89340cf7a8: Pushed 93c2e58be8e2: Pushed 891e1e4ef82a: Pushed devel: digest: sha256:3f224e6a0e5629ee356fcb4b5885d3cb46fac2cee110bd80a61e19753eb69473 size: 949 The push refers to a repository [localhost:32965/kubevirt/virt-launcher] 00d94c200a77: Preparing 3814f00e5f3b: Preparing d0235f4b3e21: Preparing d1de3ab8ec88: Preparing 8233bc1db348: Preparing 633427c64a24: Preparing da38cf808aa5: Preparing b83399358a92: Preparing 186d8b3e4fd8: Preparing fa6154170bf5: Preparing 5eefb9960a36: Preparing 891e1e4ef82a: Preparing fa6154170bf5: Waiting 5eefb9960a36: Waiting da38cf808aa5: Waiting 891e1e4ef82a: Waiting 186d8b3e4fd8: Waiting 633427c64a24: Waiting 00d94c200a77: Pushed d1de3ab8ec88: Pushed 3814f00e5f3b: Pushed b83399358a92: Pushed da38cf808aa5: Pushed fa6154170bf5: Pushed 186d8b3e4fd8: Pushed 891e1e4ef82a: Mounted from kubevirt/virt-controller d0235f4b3e21: Pushed 633427c64a24: Pushed 8233bc1db348: Pushed 5eefb9960a36: Pushed devel: digest: sha256:19c1604480d6f8bd895cab9ad5a46ed24473defea69afae9ee7b5e2cb8f137ae size: 2828 The push refers to a repository [localhost:32965/kubevirt/virt-handler] 0d14caafa5b2: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-launcher 0d14caafa5b2: Pushed devel: digest: sha256:b2e42d4856d7b6dad4ae382cbb58f2ecf64647490f44aa4e33fdf0c9511b3136 size: 741 The push refers to a repository [localhost:32965/kubevirt/virt-api] 0247a835615a: Preparing 82fc744c99b4: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-handler 82fc744c99b4: Pushed 0247a835615a: Pushed devel: digest: sha256:0594a2a24a52d187f71e9ce1920783bd2b0a3c4dcd66374a06116ab709262833 size: 948 The push refers to a repository [localhost:32965/kubevirt/disks-images-provider] 71ad31feb2c5: Preparing 21d4b721776e: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-api 71ad31feb2c5: Pushed 21d4b721776e: Pushed devel: digest: sha256:5dc088106df85eb01f2ad0566624239b95b34986820107944e36d309183fd4cd size: 948 The push refers to a repository [localhost:32965/kubevirt/vm-killer] c4cfadeeaf5f: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/disks-images-provider c4cfadeeaf5f: Pushed devel: digest: sha256:39b817b79b1fbce75dbb476bc261b2752fd6466bf98d373208d5144579da22b0 size: 740 The push refers to a repository [localhost:32965/kubevirt/registry-disk-v1alpha] 661cce8d8e52: Preparing 41e0baba3077: Preparing 25edbec0eaea: Preparing 661cce8d8e52: Pushed 41e0baba3077: Pushed 25edbec0eaea: Pushed devel: digest: sha256:0df707a55243af8792380fba68a76307017494c503e0e9071ed55d7d3c3611d4 size: 948 The push refers to a repository [localhost:32965/kubevirt/cirros-registry-disk-demo] f9f97de3966a: Preparing 661cce8d8e52: Preparing 41e0baba3077: Preparing 25edbec0eaea: Preparing 25edbec0eaea: Mounted from kubevirt/registry-disk-v1alpha 41e0baba3077: Mounted from kubevirt/registry-disk-v1alpha 661cce8d8e52: Mounted from kubevirt/registry-disk-v1alpha f9f97de3966a: Pushed devel: digest: sha256:3f818f67105a36bdc42bdbfad87fc29d0028e39a0dceef92d12efbcf8e16e5ed size: 1160 The push refers to a repository [localhost:32965/kubevirt/fedora-cloud-registry-disk-demo] 24cdf3b545f2: Preparing 661cce8d8e52: Preparing 41e0baba3077: Preparing 25edbec0eaea: Preparing 41e0baba3077: Mounted from kubevirt/cirros-registry-disk-demo 25edbec0eaea: Mounted from kubevirt/cirros-registry-disk-demo 661cce8d8e52: Mounted from kubevirt/cirros-registry-disk-demo 24cdf3b545f2: Pushed devel: digest: sha256:a6a571626690141c7da4cf0e1eb4fd75e5dd9ae427d5070c2729214cfbd6a192 size: 1161 The push refers to a repository [localhost:32965/kubevirt/alpine-registry-disk-demo] d8e356e905f4: Preparing 661cce8d8e52: Preparing 41e0baba3077: Preparing 25edbec0eaea: Preparing 41e0baba3077: Mounted from kubevirt/fedora-cloud-registry-disk-demo 661cce8d8e52: Mounted from kubevirt/fedora-cloud-registry-disk-demo 25edbec0eaea: Mounted from kubevirt/fedora-cloud-registry-disk-demo d8e356e905f4: Pushed devel: digest: sha256:c27568048aa8e031860d98cdced0370763745ad80581e62432568dac45abf1fb size: 1160 The push refers to a repository [localhost:32965/kubevirt/subresource-access-test] b60d615d0e67: Preparing 25cb73590a9d: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/vm-killer 25cb73590a9d: Pushed b60d615d0e67: Pushed devel: digest: sha256:6d30751f4edaf0dee8d9410928d22cab97314ae63ef5c6d781e57fc247e40a5f size: 948 The push refers to a repository [localhost:32965/kubevirt/winrmcli] f8083e002d0b: Preparing 53c709abc882: Preparing 9ca98a0f492b: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/subresource-access-test f8083e002d0b: Pushed 9ca98a0f492b: Pushed 53c709abc882: Pushed devel: digest: sha256:4fe6c9666a841b61b962d7fb73ccb7cb0dabc3b56e1657cfdfd9005e1a36d38c size: 1165 The push refers to a repository [localhost:32965/kubevirt/example-hook-sidecar] f3a27e1fcfd9: Preparing 39bae602f753: Preparing f3a27e1fcfd9: Pushed 39bae602f753: Pushed devel: digest: sha256:1bc1f4d00b6319d5ad9d8ea4d603534b8572ca8bcc093834590dda7f5863da07 size: 740 make[1]: Leaving directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' Done ./cluster/clean.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-136-g66b58b9 ++ KUBEVIRT_VERSION=v0.7.0-136-g66b58b9 + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:50a4b8ee3e07d592e7e4fbf3eb1401980a5947499dfdc3d847c085b5775aaa9a ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace image_pull_policy ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system +++ image_pull_policy=IfNotPresent ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32965/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace image_pull_policy + echo 'Cleaning up ...' Cleaning up ... + cluster/kubectl.sh get vmis --all-namespaces -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,FINALIZERS:.metadata.finalizers --no-headers + grep foregroundDeleteVirtualMachine + read p error: the server doesn't have a resource type "vmis" + _kubectl delete ds -l kubevirt.io -n kube-system --cascade=false --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=libvirt --force --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=virt-handler --force --grace-period 0 No resources found + namespaces=(default ${namespace}) + for i in '${namespaces[@]}' + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete deployment -l kubevirt.io No resources found + _kubectl -n default delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rs -l kubevirt.io No resources found + _kubectl -n default delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete services -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n default delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete secrets -l kubevirt.io No resources found + _kubectl -n default delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pv -l kubevirt.io No resources found + _kubectl -n default delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pvc -l kubevirt.io No resources found + _kubectl -n default delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete ds -l kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n default delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pods -l kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n default delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rolebinding -l kubevirt.io No resources found + _kubectl -n default delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete roles -l kubevirt.io No resources found + _kubectl -n default delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterroles -l kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ wc -l Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + for i in '${namespaces[@]}' + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete deployment -l kubevirt.io No resources found + _kubectl -n kube-system delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rs -l kubevirt.io No resources found + _kubectl -n kube-system delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete services -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n kube-system delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete secrets -l kubevirt.io No resources found + _kubectl -n kube-system delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pv -l kubevirt.io No resources found + _kubectl -n kube-system delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pvc -l kubevirt.io No resources found + _kubectl -n kube-system delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete ds -l kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n kube-system delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pods -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete roles -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterroles -l kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ wc -l ++ cluster/os-3.10.0/.kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + sleep 2 + echo Done Done ./cluster/deploy.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-136-g66b58b9 ++ KUBEVIRT_VERSION=v0.7.0-136-g66b58b9 + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:50a4b8ee3e07d592e7e4fbf3eb1401980a5947499dfdc3d847c085b5775aaa9a ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace image_pull_policy ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system +++ image_pull_policy=IfNotPresent ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32965/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace image_pull_policy + echo 'Deploying ...' Deploying ... + [[ -z openshift-3.10-release ]] + [[ openshift-3.10-release =~ .*-dev ]] + [[ openshift-3.10-release =~ .*-release ]] + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/demo-content.yaml =~ .*demo.* ]] + continue + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml =~ .*demo.* ]] + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml clusterrole.rbac.authorization.k8s.io "kubevirt.io:admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:edit" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:view" created serviceaccount "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver-auth-delegator" created rolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created role.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-controller" created serviceaccount "kubevirt-controller" created serviceaccount "kubevirt-privileged" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller-cluster-admin" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-privileged-cluster-admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:default" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt.io:default" created service "virt-api" created deployment.extensions "virt-api" created deployment.extensions "virt-controller" created daemonset.extensions "virt-handler" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstances.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancereplicasets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancepresets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachines.kubevirt.io" created + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R persistentvolumeclaim "disk-alpine" created persistentvolume "host-path-disk-alpine" created persistentvolumeclaim "disk-custom" created persistentvolume "host-path-disk-custom" created daemonset.extensions "disks-images-provider" created serviceaccount "kubevirt-testing" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-testing-cluster-admin" created + [[ os-3.10.0 =~ os-* ]] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-controller"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-testing"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-privileged"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-apiserver"] + _kubectl adm policy add-scc-to-user privileged admin + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged admin scc "privileged" added to: ["admin"] + echo Done Done + namespaces=(kube-system default) + [[ kube-system != \k\u\b\e\-\s\y\s\t\e\m ]] + timeout=300 + sample=30 + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'disks-images-provider-nq854 0/1 ContainerCreating 0 4s disks-images-provider-pd68g 0/1 ContainerCreating 0 4s virt-api-7d79764579-756zm 0/1 ContainerCreating 0 6s virt-api-7d79764579-st4kr 0/1 ContainerCreating 0 6s virt-controller-7d57d96b65-7cbb4 0/1 ContainerCreating 0 6s virt-controller-7d57d96b65-rmsl2 0/1 ContainerCreating 0 6s virt-handler-vv95q 0/1 ContainerCreating 0 6s virt-handler-w85fb 0/1 ContainerCreating 0 6s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + grep -v Running + cluster/kubectl.sh get pods -n kube-system --no-headers disks-images-provider-nq854 0/1 ContainerCreating 0 5s disks-images-provider-pd68g 0/1 ContainerCreating 0 5s virt-api-7d79764579-756zm 0/1 ContainerCreating 0 7s virt-api-7d79764579-st4kr 0/1 ContainerCreating 0 7s virt-controller-7d57d96b65-7cbb4 0/1 ContainerCreating 0 7s virt-controller-7d57d96b65-rmsl2 0/1 ContainerCreating 0 7s virt-handler-vv95q 0/1 ContainerCreating 0 7s virt-handler-w85fb 0/1 ContainerCreating 0 7s + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ grep -v Running ++ cluster/kubectl.sh get pods -n kube-system --no-headers + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false + '[' -n '' ']' + kubectl get pods -n kube-system + cluster/kubectl.sh get pods -n kube-system NAME READY STATUS RESTARTS AGE disks-images-provider-nq854 1/1 Running 0 38s disks-images-provider-pd68g 1/1 Running 0 38s master-api-node01 1/1 Running 1 18d master-controllers-node01 1/1 Running 1 18d master-etcd-node01 1/1 Running 1 18d virt-api-7d79764579-756zm 1/1 Running 1 40s virt-api-7d79764579-st4kr 1/1 Running 0 40s virt-controller-7d57d96b65-7cbb4 1/1 Running 0 40s virt-controller-7d57d96b65-rmsl2 1/1 Running 0 40s virt-handler-vv95q 1/1 Running 0 40s virt-handler-w85fb 1/1 Running 0 40s + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n default --no-headers ++ cluster/kubectl.sh get pods -n default --no-headers ++ grep -v Running + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n default + cluster/kubectl.sh get pods -n default NAME READY STATUS RESTARTS AGE docker-registry-1-rl562 1/1 Running 1 18d registry-console-1-rw9zf 1/1 Running 1 18d router-1-6cch9 1/1 Running 1 18d + kubectl version + cluster/kubectl.sh version oc v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 features: Basic-Auth GSSAPI Kerberos SPNEGO Server https://127.0.0.1:32962 openshift v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 + ginko_params='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + [[ openshift-3.10-release =~ windows.* ]] + FUNC_TEST_ARGS='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + make functest hack/dockerized "hack/build-func-tests.sh" Sending build context to Docker daemon 5.632 kB Step 1/12 : FROM fedora:28 ---> cc510acfcd70 Step 2/12 : ENV LIBVIRT_VERSION 4.2.0 ---> Using cache ---> b1088795aeb6 Step 3/12 : RUN curl --output /etc/yum.repos.d/fedora-virt-preview.repo https://fedorapeople.org/groups/virt/virt-preview/fedora-virt-preview.repo ---> Using cache ---> 88f43b954f9f Step 4/12 : RUN dnf -y install libvirt-devel-${LIBVIRT_VERSION} make git mercurial sudo gcc findutils gradle rsync-daemon rsync qemu-img protobuf-compiler && dnf -y clean all ---> Using cache ---> 06c70b43758a Step 5/12 : ENV GIMME_GO_VERSION 1.10 ---> Using cache ---> e5b3ae738662 Step 6/12 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> 3e3d43f49e45 Step 7/12 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 73e8a3aa263a Step 8/12 : ADD rsyncd.conf /etc/rsyncd.conf ---> Using cache ---> bc244b1c712b Step 9/12 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/mattn/goveralls && go get -u github.com/Masterminds/glide && go get golang.org/x/tools/cmd/goimports && git clone https://github.com/mvdan/sh.git $GOPATH/src/mvdan.cc/sh && cd /go/src/mvdan.cc/sh/cmd/shfmt && git checkout v2.5.0 && go get mvdan.cc/sh/cmd/shfmt && go install && go get -u github.com/golang/mock/gomock && go get -u github.com/rmohr/mock/mockgen && go get -u github.com/rmohr/go-swagger-utils/swagger-doc && go get -u github.com/onsi/ginkgo/ginkgo && go get -u -d k8s.io/code-generator/cmd/deepcopy-gen && go get -u -d k8s.io/code-generator/cmd/defaulter-gen && go get -u -d k8s.io/code-generator/cmd/openapi-gen && cd /go/src/k8s.io/code-generator/cmd/deepcopy-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/defaulter-gen && git checkout release-1.9 && go install && cd /go/src/k8s.io/code-generator/cmd/openapi-gen && git checkout release-1.9 && go install && go get -u -d github.com/golang/protobuf/protoc-gen-go && cd /go/src/github.com/golang/protobuf/protoc-gen-go && git checkout 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 && go install ---> Using cache ---> 4cd1786b2bc8 Step 10/12 : RUN pip install j2cli ---> Using cache ---> b51a532fa53a Step 11/12 : ADD entrypoint.sh /entrypoint.sh ---> Using cache ---> 3bc0185264f6 Step 12/12 : ENTRYPOINT /entrypoint.sh ---> Using cache ---> dcf2b21fa2ed Successfully built dcf2b21fa2ed go version go1.10 linux/amd64 go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/functests.sh Running Suite: Tests Suite ========================== Random Seed: 1532632198 Will run 148 of 148 specs • [SLOW TEST:20.287 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:21.453 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given an vm /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:19.743 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi preset /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:19.486 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi replica set /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:72.273 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting and stopping the same VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:90 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:91 should success multiple times /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:92 ------------------------------ • [SLOW TEST:18.527 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:111 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:112 should not modify the spec on status update /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:113 ------------------------------ • [SLOW TEST:25.902 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting multiple VMIs /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:129 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:130 should success /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:131 ------------------------------ • [SLOW TEST:21.154 seconds] VNC /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:54 with VNC connection /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:62 should allow accessing the VNC device /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:64 ------------------------------ ••• ------------------------------ • [SLOW TEST:15.878 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should scale /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 to five, to six and then to zero replicas /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:5.158 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should be rejected on POST if spec is invalid /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:107 ------------------------------ • ------------------------------ • [SLOW TEST:21.767 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should update readyReplicas once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:157 ------------------------------ • [SLOW TEST:7.493 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should remove VMIs once it is marked for deletion /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:169 ------------------------------ • ------------------------------ • [SLOW TEST:5.698 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should not scale when paused and scale when resume /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:223 ------------------------------ • [SLOW TEST:13.835 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should remove the finished VM /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:279 ------------------------------ 2018/07/26 15:15:37 read closing down: EOF Service cluster-ip-vmi successfully exposed for virtualmachineinstance testvmirb6dn • [SLOW TEST:47.084 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:68 Should expose a Cluster IP service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:71 ------------------------------ Service cluster-ip-target-vmi successfully exposed for virtualmachineinstance testvmirb6dn •Service node-port-vmi successfully exposed for virtualmachineinstance testvmirb6dn ------------------------------ • [SLOW TEST:9.505 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose NodePort service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:124 Should expose a NodePort service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:129 ------------------------------ 2018/07/26 15:16:40 read closing down: EOF Service cluster-ip-udp-vmi successfully exposed for virtualmachineinstance testvmicrvcs • [SLOW TEST:51.990 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VMI /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:166 Expose ClusterIP UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:173 Should expose a ClusterIP service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:177 ------------------------------ Service node-port-udp-vmi successfully exposed for virtualmachineinstance testvmicrvcs • [SLOW TEST:8.732 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VMI /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:166 Expose NodePort UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:205 Should expose a NodePort service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:210 ------------------------------ 2018/07/26 15:22:32 read closing down: EOF Pod name: disks-images-provider-nq854 Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pd68g Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-756zm Pod phase: Running level=info timestamp=2018-07-26T19:21:54.067461Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 19:22:00 http: TLS handshake error from 10.129.0.1:35174: EOF level=info timestamp=2018-07-26T19:22:04.309660Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 19:22:10 http: TLS handshake error from 10.129.0.1:35186: EOF level=info timestamp=2018-07-26T19:22:13.612816Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-26T19:22:14.543009Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:22:15.606476Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:22:15.754343Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:22:15.906015Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:22:16.899143Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:22:16.915603Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:22:16.938318Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 19:22:20 http: TLS handshake error from 10.129.0.1:35200: EOF level=info timestamp=2018-07-26T19:22:24.880693Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 19:22:30 http: TLS handshake error from 10.129.0.1:35212: EOF Pod name: virt-api-7d79764579-st4kr Pod phase: Running 2018/07/26 19:20:21 http: TLS handshake error from 10.129.0.1:59400: EOF 2018/07/26 19:20:31 http: TLS handshake error from 10.129.0.1:59412: EOF 2018/07/26 19:20:41 http: TLS handshake error from 10.129.0.1:59424: EOF 2018/07/26 19:20:51 http: TLS handshake error from 10.129.0.1:59436: EOF 2018/07/26 19:21:01 http: TLS handshake error from 10.129.0.1:59448: EOF 2018/07/26 19:21:11 http: TLS handshake error from 10.129.0.1:59460: EOF 2018/07/26 19:21:21 http: TLS handshake error from 10.129.0.1:59472: EOF 2018/07/26 19:21:31 http: TLS handshake error from 10.129.0.1:59484: EOF 2018/07/26 19:21:41 http: TLS handshake error from 10.129.0.1:59496: EOF level=info timestamp=2018-07-26T19:21:43.708077Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 19:21:51 http: TLS handshake error from 10.129.0.1:59508: EOF 2018/07/26 19:22:01 http: TLS handshake error from 10.129.0.1:59520: EOF 2018/07/26 19:22:11 http: TLS handshake error from 10.129.0.1:59532: EOF 2018/07/26 19:22:21 http: TLS handshake error from 10.129.0.1:59546: EOF 2018/07/26 19:22:31 http: TLS handshake error from 10.129.0.1:59558: EOF Pod name: virt-controller-7d57d96b65-7cbb4 Pod phase: Running level=info timestamp=2018-07-26T19:09:04.096713Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-rmsl2 Pod phase: Running level=info timestamp=2018-07-26T19:14:56.821550Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirb6dn\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirb6dn" level=info timestamp=2018-07-26T19:15:53.167731Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmicrvcs kind= uid=5012a507-9108-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:15:53.170315Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmicrvcs kind= uid=5012a507-9108-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:15:53.532331Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmicrvcs\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmicrvcs" level=info timestamp=2018-07-26T19:16:54.049124Z pos=vm.go:459 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Looking for VirtualMachineInstance Ref" level=error timestamp=2018-07-26T19:16:54.052936Z pos=vm.go:462 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Cant find the matching VM for VirtualMachineInstance: testvmijcm25vqjr8" level=info timestamp=2018-07-26T19:16:54.050669Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:16:54.053587Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:16:54.056372Z pos=vm.go:459 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Looking for VirtualMachineInstance Ref" level=error timestamp=2018-07-26T19:16:54.056531Z pos=vm.go:462 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Cant find the matching VM for VirtualMachineInstance: testvmijcm254rvs4" level=info timestamp=2018-07-26T19:16:54.056713Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:16:54.057028Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:16:54.237430Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmijcm25vqjr8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmijcm25vqjr8" level=info timestamp=2018-07-26T19:16:54.244870Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmijcm254rvs4\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmijcm254rvs4" level=info timestamp=2018-07-26T19:16:54.549236Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmijcm254rvs4\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmijcm254rvs4" Pod name: virt-handler-vv95q Pod phase: Running level=info timestamp=2018-07-26T19:15:12.744345Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:15:12.886410Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmirb6dn kind= uid=2da89824-9108-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:15:12.886563Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmirb6dn kind= uid=2da89824-9108-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:15:12.896901Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmirb6dn kind= uid=2da89824-9108-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:17:10.625329Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:17:11.535036Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type ADDED" level=info timestamp=2018-07-26T19:17:11.538611Z pos=vm.go:657 component=virt-handler namespace=kubevirt-test-default name=testvmijcm254rvs4 kind=Domain uid=745536ee-9108-11e8-858f-525500d15501 msg="Domain is in state Paused reason StartingUp" level=info timestamp=2018-07-26T19:17:11.841646Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:17:11.846795Z pos=vm.go:688 component=virt-handler namespace=kubevirt-test-default name=testvmijcm254rvs4 kind=Domain uid=745536ee-9108-11e8-858f-525500d15501 msg="Domain is in state Running reason Unknown" level=info timestamp=2018-07-26T19:17:11.875735Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:17:11.897258Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:17:11.901671Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T19:17:12.170053Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:17:12.170362Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:17:12.177127Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-handler-w85fb Pod phase: Running level=info timestamp=2018-07-26T19:17:13.070225Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type ADDED" level=info timestamp=2018-07-26T19:17:13.070713Z pos=vm.go:657 component=virt-handler namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind=Domain uid=7454acee-9108-11e8-858f-525500d15501 msg="Domain is in state Paused reason StartingUp" level=info timestamp=2018-07-26T19:17:13.379897Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:17:13.399287Z pos=vm.go:688 component=virt-handler namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind=Domain uid=7454acee-9108-11e8-858f-525500d15501 msg="Domain is in state Running reason Unknown" level=info timestamp=2018-07-26T19:17:13.570828Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:17:13.571411Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T19:17:13.618135Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:17:13.739669Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:17:13.740184Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="No update processing required" level=error timestamp=2018-07-26T19:17:13.760206Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmijcm25vqjr8\": the object has been modified; please apply your changes to the latest version and try again" msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-26T19:17:13.760715Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmijcm25vqjr8\": the object has been modified; please apply your changes to the latest version and try again" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmijcm25vqjr8" level=info timestamp=2018-07-26T19:17:13.761292Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:17:13.767207Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:17:13.767979Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:17:13.772297Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Synchronization loop succeeded." Pod name: netcatg5gvl Pod phase: Succeeded ++ head -n 1 +++ nc 192.168.66.102 30017 -i 1 -w 1 + x='Hello World!' + echo 'Hello World!' Hello World! + '[' 'Hello World!' = 'Hello World!' ']' + echo succeeded succeeded + exit 0 Pod name: netcatj2xgk Pod phase: Succeeded ++ head -n 1 +++ nc 172.30.42.188 27017 -i 1 -w 1 Hello World! succeeded + x='Hello World!' + echo 'Hello World!' + '[' 'Hello World!' = 'Hello World!' ']' + echo succeeded + exit 0 Pod name: netcatk2fwl Pod phase: Succeeded ++ head -n 1 +++ nc -ul 31016 +++ echo +++ nc -up 31016 192.168.66.101 31017 -i 1 -w 1 Hello UDP World! succeeded + x='Hello UDP World!' + echo 'Hello UDP World!' + '[' 'Hello UDP World!' = 'Hello UDP World!' ']' + echo succeeded + exit 0 Pod name: netcatqh99s Pod phase: Succeeded ++ head -n 1 +++ nc -ul 28016 +++ nc -up 28016 172.30.37.198 28017 -i 1 -w 1 +++ echo + x='Hello UDP World!' + echo 'Hello UDP World!' + '[' 'Hello UDP World!' = 'Hello UDP World!' ']' + echo succeeded + exit 0 Hello UDP World! succeeded Pod name: netcatrqcbm Pod phase: Succeeded ++ head -n 1 +++ nc -ul 31016 +++ nc -up 31016 192.168.66.102 31017 -i 1 -w 1 +++ echo Hello UDP World! succeeded + x='Hello UDP World!' + echo 'Hello UDP World!' + '[' 'Hello UDP World!' = 'Hello UDP World!' ']' + echo succeeded + exit 0 Pod name: netcatsxddm Pod phase: Succeeded ++ head -n 1 +++ nc -ul 29016 +++ echo +++ nc -up 29016 172.30.56.224 29017 -i 1 -w 1 Hello UDP World! succeeded + x='Hello UDP World!' + echo 'Hello UDP World!' + '[' 'Hello UDP World!' = 'Hello UDP World!' ']' + echo succeeded + exit 0 Pod name: netcatxghlg Pod phase: Succeeded ++ head -n 1 +++ nc 192.168.66.101 30017 -i 1 -w 1 Hello World! succeeded + x='Hello World!' + echo 'Hello World!' + '[' 'Hello World!' = 'Hello World!' ']' + echo succeeded + exit 0 Pod name: virt-launcher-testvmi285vs5dblw-5bfxn Pod phase: Running level=info timestamp=2018-07-26T19:15:03.514917Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:15:03.515299Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 7646e498-054b-4e9b-8973-49e8fd937b00" level=info timestamp=2018-07-26T19:15:03.515538Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:15:03.597032Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:15:03.787163Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:15:03.818397Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:15:03.827844Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmi285vs5dblw kind= uid=2727122b-9108-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:15:03.832422Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi285vs5dblw kind= uid=2727122b-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:15:04.413830Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:15:04.414303Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:15:04.432943Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:15:04.532338Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 7646e498-054b-4e9b-8973-49e8fd937b00: 183" level=info timestamp=2018-07-26T19:15:06.220495Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:15:06.335840Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi285vs5dblw kind= uid=2727122b-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:15:06.341617Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi285vs5dblw kind= uid=2727122b-9108-11e8-858f-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmi285vsntjv4-ksnnr Pod phase: Running level=info timestamp=2018-07-26T19:14:59.426136Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-26T19:15:00.117234Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:15:00.140486Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 58e3be23-95d4-47d6-9e84-21be3af26dbd" level=info timestamp=2018-07-26T19:15:00.143567Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:15:00.161387Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:15:00.271673Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:15:00.317376Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmi285vsntjv4 kind= uid=25b0f0ec-9108-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:15:00.317773Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:15:00.319163Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi285vsntjv4 kind= uid=25b0f0ec-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:15:00.391006Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:15:00.391165Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:15:00.413912Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:15:00.429310Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:15:00.923892Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi285vsntjv4 kind= uid=25b0f0ec-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:15:01.163081Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 58e3be23-95d4-47d6-9e84-21be3af26dbd: 177" Pod name: virt-launcher-testvmicrvcs-vb7jt Pod phase: Running level=info timestamp=2018-07-26T19:16:10.527609Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:16:10.557649Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 66c4ba51-4bef-4abb-96b2-9fc7ed3c18d6" level=info timestamp=2018-07-26T19:16:10.560173Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:16:10.564698Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:16:11.096406Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:16:11.123767Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:16:11.133920Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmicrvcs kind= uid=5012a507-9108-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:16:11.135766Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmicrvcs kind= uid=5012a507-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:16:11.470984Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:16:11.471229Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:16:11.497744Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:16:11.499354Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmicrvcs kind= uid=5012a507-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:16:11.509434Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:16:11.525522Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmicrvcs kind= uid=5012a507-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:16:11.581374Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 66c4ba51-4bef-4abb-96b2-9fc7ed3c18d6: 186" Pod name: virt-launcher-testvmijcm254rvs4-dnnmv Pod phase: Running level=info timestamp=2018-07-26T19:17:10.888260Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-26T19:17:11.490751Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:17:11.539418Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:17:11.660542Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 9aab26a2-65cd-41f4-af73-e00fa98e402b" level=info timestamp=2018-07-26T19:17:11.660855Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:17:11.723440Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:17:11.755570Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:17:11.803775Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:17:11.814457Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:17:11.846961Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:17:11.847109Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:17:11.868816Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:17:11.880096Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:17:12.176765Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmijcm254rvs4 kind= uid=745536ee-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:17:12.664953Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 9aab26a2-65cd-41f4-af73-e00fa98e402b: 189" Pod name: virt-launcher-testvmijcm25vqjr8-4h8dz Pod phase: Running level=info timestamp=2018-07-26T19:17:13.046783Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:17:13.071167Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:17:13.210621Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 1b024ef3-8c24-4b55-b56b-064f2acfd7d4" level=info timestamp=2018-07-26T19:17:13.211620Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:17:13.322408Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:17:13.356918Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:17:13.367746Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:17:13.374375Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:17:13.400371Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:17:13.400542Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:17:13.416742Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:17:13.618661Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:17:13.766349Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:17:13.771770Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmijcm25vqjr8 kind= uid=7454acee-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:17:14.215167Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 1b024ef3-8c24-4b55-b56b-064f2acfd7d4: 187" Pod name: virt-launcher-testvmirb6dn-v254t Pod phase: Running level=info timestamp=2018-07-26T19:15:11.243042Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-26T19:15:11.881537Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:15:11.891971Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:15:12.459540Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 8205663c-954c-4b51-9683-acb9004987eb" level=info timestamp=2018-07-26T19:15:12.465052Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:15:12.667646Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:15:12.700959Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:15:12.706933Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmirb6dn kind= uid=2da89824-9108-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:15:12.709663Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmirb6dn kind= uid=2da89824-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:15:12.718838Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:15:12.718997Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:15:12.731683Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:15:12.745109Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:15:12.890362Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmirb6dn kind= uid=2da89824-9108-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:15:13.470113Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 8205663c-954c-4b51-9683-acb9004987eb: 183" • Failure in Spec Setup (BeforeEach) [341.121 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VMI replica set /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:253 Expose ClusterIP service [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:286 Should create a ClusterIP service on VMRS and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:290 Expected error: : 180000000000 expect: timer expired after 180 seconds not to have occurred /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:36 ------------------------------ STEP: Creating a VMRS object with 2 replicas STEP: Start the replica set STEP: Checking the number of ready replicas STEP: add an 'hello world' server on each VMI in the replica set level=info timestamp=2018-07-26T19:22:32.893405Z pos=utils.go:1237 component=tests namespace=kubevirt-test-default name=testvmi285vs5dblw kind=VirtualMachineInstance uid=2727122b-9108-11e8-858f-525500d15501 msg="Login: [{2 \r\n\r\nfailed 10/20: up 116.50. request failed\r\nfailed 11/20: up 128.58. request failed\r\nfailed 12/20: up 140.63. request failed\r\nfailed 13/20: up 153.03. request failed\r\nfailed 14/20: up 165.11. request failed\r\nfailed 15/20: up 177.18. request failed\r\nfailed 16/20: up 189.25. request failed\r\nfailed 17/20: up 201.32. request failed\r\nfailed 18/20: up 213.39. request failed\r\nfailed 19/20: up 225.44. request failed\r\nfailed 20/20: up 237.53. request failed\r\nfailed to read iid from metadata. tried 20\r\nfailed to get instance-id of datasource\r\nTop of dropbear init script\r\nStarting dropbear sshd: failed to get instance-id of datasource\r\nOK\r\nGROWROOT: NOCHANGE: partition 1 is size 71647. it cannot be grown\r\n/dev/root resized successfully [took 0.03s]\r\n=== system information ===\r\nPlatform: QEMU Standard PC (Q35 + ICH9, 2009)\r\nContainer: none\r\nArch: x86_64\r\nCPU(s): 1 @ 2099.998 MHz\r\nCores/Sockets/Threads: 1/1/1\r\nVirt-type: \r\nRAM Size: 44MB\r\nDisks:\r\nNAME MAJ:MIN SIZE LABEL MOUNTPOINT\r\nvda 253:0 46137344 \r\nvda1 253:1 36683264 cirros-rootfs /\r\nvda15 253:15 8388608 \r\n=== sshd host keys ===\r\n-----BEGIN SSH HOST KEY KEYS-----\r\nssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCRhJR9wt5APo+mC5ZznCf03reNHZPABnIUQBG0s6NhZVSVsc8jNUhNQ5fGNzEGZaws94jbhU0wDgffWQy9DnBuLyF0FBTTbxn0DfesNrBdRF7gTTKNjLyCuRIW/DAo0ePQXWnVuawMB/VpEyKxTOQliuLaEZCVhq4mp9dpOQ7ablaYcfY4JFVuINflOsVQp24nWVfxw8BYlZAywu5nCIVNCUuhzuQcIUN2+XyO8lFvJMF2nwhW3W05juAPoUR0IBiDvmrXgc7o9ViTFdoDfm0BadQe26MvmIju2cL5SXyeEepA5L5HpgEM8nh5cBKSjdkTv8AHL6nSVhShY4uGqMW/ root@cirros\r\nssh-dss AAAAB3NzaC1kc3MAAACBAJHsD2112L2j8l7m6ahZpVsL3u5QRjlo+x9+H0ccAJbmCLMJ1vbFCn022ZOEbvT+p9h1PCLMLMEpKXKpp+szgVpTFxFb/bgz2p3bf/i0N9jXr7Ez272mInyunGwWHzMVmzC0mRv1G44AEE2RQ+Ehq7FyDm1hZV9PMoc3RutoSQZFAAAAFQCziHSOeLJsb82UGJWOcxhcCzDjqQAAAIBoG9xuAKQLZqpFSK7MeLS9H7FE1JYCpIxgfp+SrrkO3Is5otFZeIl06mU7yKDDDgYYx6LsBziszOlKJ2g37o2rRZJ+4BfWOTj937JRgCsRZBe93pn+6rzhuBN8/A/xRHpxjGREFk1gus0GXOgygcrO15D7F9dBbpP2wyt06V7qpAAAAIBcVymeU30QzkAe7WQcxMWHxM8ZnVtRS4LNqGmK6b+YcJmYkxuHuK2mpCJHxn3OZc3sIXAuq/hn5nVle5MdUf1Km9zllIuTAUg1VUIRjLfmV4/YAFZqm3oyNlu5Tff28IqPSUcC8yw4OeYYJC0N79Zef/AgZnq3Po86oEM7kumYaQ== root@cirros\r\n-----END SSH HOST KEY KEYS-----\r\n=== network info ===\r\nif-info: lo,up,127.0.0.1,8,,\r\nif-info: eth0,up,10.129.0.28,23,fe80::858:aff:fe81:1c/64,\r\nip-route:default via 10.129.0.1 dev eth0 \r\nip-route:10.128.0.0/14 dev eth0 \r\nip-route:10.129.0.0/23 dev eth0 src 10.129.0.28 \r\nip-route:224.0.0.0/4 dev eth0 \r\nip-route6:fe80::/64 dev eth0 metric 256 \r\nip-route6:unreachable default dev lo metric -1 error -101\r\nip-route6:ff00::/8 dev eth0 metric 256 \r\nip-route6:unreachable default dev lo metric -1 error -101\r\n=== datasource: None None ===\r\n=== cirros: current=0.4.0 uptime=252.63 ===\r\n ____ ____ ____\r\n / __/ __ ____ ____ / __ \\/ __/\r\n/ /__ / // __// __// /_/ /\\ \\ \r\n\\___//_//_/ /_/ \\____/___/ \r\n http://cirros-cloud.net\r\n\r\n\r\r\nlogin as 'cirros' user. default password: 'gocubsgo'. use 'sudo' for root. [login as 'cirros' user. default password: 'gocubsgo'. use 'sudo' for root.]} {4 \r\n\rcirros login: \r\r\nlogin as 'cirros' user. default password: 'gocubsgo'. use 'sudo' for root.\r\n\rcirros login: []}]" Service cluster-ip-vm successfully exposed for virtualmachine testvmirbglg VM testvmirbglg was scheduled to start 2018/07/26 15:23:19 read closing down: EOF • [SLOW TEST:49.162 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on an VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:318 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:362 Connect to ClusterIP services that was set when VM was offline /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:363 ------------------------------ •• ------------------------------ • [SLOW TEST:19.557 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should update VirtualMachine once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:195 ------------------------------ •• ------------------------------ • [SLOW TEST:38.011 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if it gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:245 ------------------------------ • [SLOW TEST:48.084 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if the VirtualMachineInstance's pod gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:265 ------------------------------ • [SLOW TEST:32.086 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should stop VirtualMachineInstance if running set to false /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:325 ------------------------------ • [SLOW TEST:139.390 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should start and stop VirtualMachineInstance multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:333 ------------------------------ • [SLOW TEST:49.182 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should not update the VirtualMachineInstance spec if Running /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:346 ------------------------------ • [SLOW TEST:173.894 seconds] VirtualMachine 2018/07/26 15:31:56 read closing down: EOF /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 2018/07/26 15:31:56 read closing down: EOF A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should survive guest shutdown, multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:387 ------------------------------ 2018/07/26 15:31:56 read closing down: EOF VM testvmitl9p9 was scheduled to start • [SLOW TEST:18.641 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should start a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:436 ------------------------------ VM testvmiblh5t was scheduled to stop • [SLOW TEST:27.864 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should stop a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:467 ------------------------------ • [SLOW TEST:39.815 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 should have cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:82 ------------------------------ 2018/07/26 15:33:22 read closing down: EOF • [SLOW TEST:102.306 seconds] 2018/07/26 15:35:05 read closing down: EOF CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 with injected ssh-key /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:92 should have ssh-key under authorized keys /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:93 ------------------------------ 2018/07/26 15:35:47 read closing down: EOF • [SLOW TEST:52.516 seconds] 2018/07/26 15:35:57 read closing down: EOF CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userData source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:118 should process provided cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:119 ------------------------------ 2018/07/26 15:36:41 read closing down: EOF • [SLOW TEST:43.617 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 should take user-data from k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:162 ------------------------------ 2018/07/26 15:37:17 read closing down: EOF • [SLOW TEST:35.955 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ 2018/07/26 15:37:50 read closing down: EOF • [SLOW TEST:33.450 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ 2018/07/26 15:39:41 read closing down: EOF • [SLOW TEST:122.497 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ 2018/07/26 15:41:55 read closing down: EOF • [SLOW TEST:130.635 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:43.048 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 2018/07/26 15:42:46 read closing down: EOF With an emptyDisk defined /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:113 should create a writeable emptyDisk with the right capacity /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:115 ------------------------------ • [SLOW TEST:41.615 seconds] 2018/07/26 15:43:28 read closing down: EOF Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined and a specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:163 should create a writeable emptyDisk with the specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:165 ------------------------------ • [SLOW TEST:32.820 seconds] 2018/07/26 15:44:01 read closing down: EOF Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should be successfully started /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:207 ------------------------------ 2018/07/26 15:45:13 read closing down: EOF 2018/07/26 15:45:13 read closing down: EOF • [SLOW TEST:72.486 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should not persist data /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:218 ------------------------------ 2018/07/26 15:47:00 read closing down: EOF • [SLOW TEST:106.650 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With VirtualMachineInstance with two PVCs /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:266 should start vmi multiple times /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:278 ------------------------------ •• 2018/07/26 15:47:46 read closing down: EOF ------------------------------ • [SLOW TEST:45.714 seconds] Health Monitoring /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:37 A VirtualMachineInstance with a watchdog device /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:56 should be shut down when the watchdog expires /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:57 ------------------------------ volumedisk0 compute 2018/07/26 15:48:27 read closing down: EOF • [SLOW TEST:40.750 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with 3 CPU cores /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:56 should report 3 cpu cores under guest OS /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:62 ------------------------------ • [SLOW TEST:19.666 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:108 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-2Mi /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ S [SKIPPING] [0.238 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:108 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-1Gi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 No node with hugepages hugepages-1Gi capacity /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:160 ------------------------------ •2018/07/26 15:50:22 read closing down: EOF ------------------------------ • [SLOW TEST:93.463 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:284 should report defined CPU model /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:285 ------------------------------ 2018/07/26 15:52:02 read closing down: EOF • [SLOW TEST:99.988 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model equals to passthrough /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:312 should report exactly the same model as node CPU /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:313 ------------------------------ • [SLOW TEST:93.020 seconds] 2018/07/26 15:53:35 read closing down: EOF Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model not defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:336 should report CPU model from libvirt capabilities /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:337 ------------------------------ • [SLOW TEST:42.845 seconds] Configurations 2018/07/26 15:54:17 read closing down: EOF /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 New VirtualMachineInstance with all supported drives /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:357 should have all the device nodes /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:380 ------------------------------ ••••••••••• ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.031 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to start a vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:133 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.023 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to stop a running vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:139 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.012 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have correct UUID /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:192 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.010 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have pod IP /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:208 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.033 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to start a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:242 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.023 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to stop a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:250 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ 2018/07/26 15:55:11 read closing down: EOF 2018/07/26 15:55:55 read closing down: EOF • [SLOW TEST:95.188 seconds] 2018/07/26 15:55:59 read closing down: EOF Slirp /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:39 should be able to /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 VirtualMachineInstance with slirp interface /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ 2018/07/26 15:56:02 read closing down: EOF •••• ------------------------------ • [SLOW TEST:6.136 seconds] Templates /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:42 Launching VMI from VM Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:60 with given Fedora Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:193 with given VM JSON from the Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:152 with given VM from the VM JSON /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:158 with given VMI from the VM /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:163 should succeed to terminate the VMI using oc-patch command /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:166 ------------------------------ Pod name: disks-images-provider-nq854 Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pd68g Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-756zm Pod phase: Running 2018/07/26 19:56:20 http: TLS handshake error from 10.129.0.1:37744: EOF level=info timestamp=2018-07-26T19:56:23.763437Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:56:25.419726Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:56:26.341037Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:56:26.600215Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=10s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 19:56:30 http: TLS handshake error from 10.129.0.1:37756: EOF level=info timestamp=2018-07-26T19:56:34.231041Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:56:37.034690Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 19:56:41 http: TLS handshake error from 10.129.0.1:37766: EOF level=info timestamp=2018-07-26T19:56:44.048743Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/openapi/v2 proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-26T19:56:44.143351Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/swagger.json proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-26T19:56:44.623501Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:56:45.541402Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T19:56:45.590018Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 19:56:51 http: TLS handshake error from 10.129.0.1:37780: EOF Pod name: virt-api-7d79764579-st4kr Pod phase: Running 2018/07/26 19:55:11 http: TLS handshake error from 10.129.0.1:33752: EOF level=info timestamp=2018-07-26T19:55:14.564647Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 19:55:21 http: TLS handshake error from 10.129.0.1:33764: EOF 2018/07/26 19:55:31 http: TLS handshake error from 10.129.0.1:33778: EOF 2018/07/26 19:55:41 http: TLS handshake error from 10.129.0.1:33792: EOF level=info timestamp=2018-07-26T19:55:44.536567Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 19:55:51 http: TLS handshake error from 10.129.0.1:33804: EOF 2018/07/26 19:56:01 http: TLS handshake error from 10.129.0.1:33834: EOF 2018/07/26 19:56:11 http: TLS handshake error from 10.129.0.1:33846: EOF level=info timestamp=2018-07-26T19:56:14.355744Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 19:56:21 http: TLS handshake error from 10.129.0.1:33858: EOF 2018/07/26 19:56:32 http: TLS handshake error from 10.129.0.1:33870: EOF 2018/07/26 19:56:41 http: TLS handshake error from 10.129.0.1:33882: EOF 2018/07/26 19:56:51 http: TLS handshake error from 10.129.0.1:33894: EOF level=info timestamp=2018-07-26T19:56:52.147280Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 Pod name: virt-controller-7d57d96b65-7cbb4 Pod phase: Running level=info timestamp=2018-07-26T19:09:04.096713Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-rmsl2 Pod phase: Running level=info timestamp=2018-07-26T19:56:16.387423Z pos=vm.go:186 component=virt-controller service=http namespace=default name=testvm kind= uid=f2c28e3a-910d-11e8-858f-525500d15501 msg="Creating or the VirtualMachineInstance: false" level=info timestamp=2018-07-26T19:56:16.387524Z pos=vm.go:262 component=virt-controller service=http msg="vmi is nil" level=info timestamp=2018-07-26T19:56:16.424529Z pos=vm.go:135 component=virt-controller service=http namespace=default name=testvm kind= uid=f2c28e3a-910d-11e8-858f-525500d15501 msg="Started processing VM" level=info timestamp=2018-07-26T19:56:16.424727Z pos=vm.go:186 component=virt-controller service=http namespace=default name=testvm kind= uid=f2c28e3a-910d-11e8-858f-525500d15501 msg="Creating or the VirtualMachineInstance: false" level=info timestamp=2018-07-26T19:56:16.424835Z pos=vm.go:262 component=virt-controller service=http msg="vmi is nil" level=info timestamp=2018-07-26T19:56:18.449558Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:56:18.450195Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:56:18.475751Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmipb77d kind= uid=f5af5278-910d-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:56:18.476015Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmipb77d kind= uid=f5af5278-910d-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:56:18.552539Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5n9s5 kind= uid=f5b4a7b5-910d-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:56:18.552813Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5n9s5 kind= uid=f5b4a7b5-910d-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:56:18.641666Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:56:18.641908Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:56:18.840833Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi44czt\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi44czt" level=info timestamp=2018-07-26T19:56:18.941159Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmipb77d\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmipb77d" Pod name: virt-handler-vv95q Pod phase: Running level=info timestamp=2018-07-26T19:53:34.823575Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmigxdrp kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T19:53:34.825334Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmigxdrp kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:53:34.827114Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmigxdrp kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T19:53:34.827974Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmigxdrp kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:56:33.930064Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:56:34.632472Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type ADDED" level=info timestamp=2018-07-26T19:56:34.633795Z pos=vm.go:657 component=virt-handler namespace=kubevirt-test-default name=testvmiblpq5 kind=Domain uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Domain is in state Paused reason StartingUp" level=info timestamp=2018-07-26T19:56:35.296970Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:56:35.316245Z pos=vm.go:688 component=virt-handler namespace=kubevirt-test-default name=testvmiblpq5 kind=Domain uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Domain is in state Running reason Unknown" level=info timestamp=2018-07-26T19:56:35.317123Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:56:35.318954Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:56:35.319066Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T19:56:35.405224Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:56:35.408706Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:56:35.417229Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-handler-w85fb Pod phase: Running level=info timestamp=2018-07-26T19:56:39.821532Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:56:39.833133Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmipb77d kind= uid=f5af5278-910d-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:56:39.848771Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmipb77d kind= uid=f5af5278-910d-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:56:43.299902Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:56:45.916405Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type ADDED" level=info timestamp=2018-07-26T19:56:46.845150Z pos=vm.go:657 component=virt-handler namespace=kubevirt-test-default name=testvmi44czt kind=Domain uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Domain is in state Paused reason StartingUp" level=info timestamp=2018-07-26T19:56:47.451947Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:56:48.482373Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T19:56:48.228148Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:56:49.328123Z pos=vm.go:688 component=virt-handler namespace=kubevirt-test-default name=testvmi44czt kind=Domain uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Domain is in state Running reason Unknown" level=info timestamp=2018-07-26T19:56:50.973906Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:56:52.096562Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:56:52.800593Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:56:53.358062Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:56:53.685493Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Processing vmi update" Pod name: virt-launcher-testvmi44czt-bt6zd Pod phase: Running level=info timestamp=2018-07-26T19:56:44.033443Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-26T19:56:44.898092Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:56:44.955878Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 1c691d94-55be-4bed-8f62-bae598b4e3e9" level=info timestamp=2018-07-26T19:56:44.956742Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:56:45.918562Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:56:45.919079Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:56:45.994229Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 1c691d94-55be-4bed-8f62-bae598b4e3e9: 216" level=info timestamp=2018-07-26T19:56:46.032069Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:56:46.050312Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:56:46.051930Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:56:48.932538Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:56:48.933189Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:56:48.978424Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:56:52.911701Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi44czt kind= uid=f5aabdca-910d-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:56:53.360100Z pos=client.go:145 component=virt-launcher msg="processed event" Pod name: virt-launcher-testvmi5n9s5-w52z7 Pod phase: Running level=info timestamp=2018-07-26T19:56:37.864778Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:56:37.986998Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:56:38.058475Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID e4afe871-571c-4b3c-9d7a-4e27046a321f" level=info timestamp=2018-07-26T19:56:38.061177Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:56:38.295341Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:56:38.332935Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:56:38.341755Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmi5n9s5 kind= uid=f5b4a7b5-910d-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:56:38.344176Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi5n9s5 kind= uid=f5b4a7b5-910d-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:56:38.427035Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:56:38.427546Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:56:38.446325Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:56:38.557292Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:56:39.071425Z pos=monitor.go:222 component=virt-launcher msg="Found PID for e4afe871-571c-4b3c-9d7a-4e27046a321f: 188" level=info timestamp=2018-07-26T19:56:39.801527Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi5n9s5 kind= uid=f5b4a7b5-910d-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:56:39.812547Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi5n9s5 kind= uid=f5b4a7b5-910d-11e8-858f-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmiblpq5-54bwp Pod phase: Running level=info timestamp=2018-07-26T19:56:34.185354Z pos=manager.go:157 component=virt-launcher namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Domain defined." level=info timestamp=2018-07-26T19:56:34.623317Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:56:34.633231Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:56:34.794381Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 84b47225-6358-453c-8092-91f3d699cae0" level=info timestamp=2018-07-26T19:56:34.795316Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:56:35.224821Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:56:35.270014Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:56:35.270417Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:56:35.276123Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:56:35.299285Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:56:35.299808Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:56:35.315416Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:56:35.318036Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:56:35.415441Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:56:35.894781Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 84b47225-6358-453c-8092-91f3d699cae0: 184" Pod name: virt-launcher-testvmipb77d-zt6n7 Pod phase: Running level=info timestamp=2018-07-26T19:56:38.649082Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:56:38.660251Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 68fbb43e-7844-4804-9fbb-0e7d14343894" level=info timestamp=2018-07-26T19:56:38.660425Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:56:38.753717Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:56:38.860051Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:56:38.876570Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:56:38.889565Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmipb77d kind= uid=f5af5278-910d-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:56:38.891367Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmipb77d kind= uid=f5af5278-910d-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:56:39.397542Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:56:39.398202Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:56:39.478521Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:56:39.668604Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 68fbb43e-7844-4804-9fbb-0e7d14343894: 198" level=info timestamp=2018-07-26T19:56:39.794050Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmipb77d kind= uid=f5af5278-910d-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:56:39.840290Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmipb77d kind= uid=f5af5278-910d-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:56:39.849704Z pos=client.go:145 component=virt-launcher msg="processed event" • Failure in Spec Setup (BeforeEach) [36.118 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be able to reach [BeforeEach] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Expected error: <*kubecli.AsyncSubresourceError | 0xc420b651a0>: { err: "Can't connect to websocket (503): service unavailable\n\n", StatusCode: 503, } Can't connect to websocket (503): service unavailable not to have occurred /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:94 ------------------------------ level=info timestamp=2018-07-26T19:56:19.455370Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmi44czt-bt6zd" level=info timestamp=2018-07-26T19:56:41.103698Z pos=utils.go:243 component=tests msg="Pod owner ship transferred to the node virt-launcher-testvmi44czt-bt6zd" level=info timestamp=2018-07-26T19:56:48.191907Z pos=utils.go:243 component=tests msg="VirtualMachineInstance defined." level=info timestamp=2018-07-26T19:56:52.361021Z pos=utils.go:243 component=tests msg="VirtualMachineInstance started." 2018/07/26 15:57:50 read closing down: EOF 2018/07/26 15:58:01 read closing down: EOF 2018/07/26 15:58:12 read closing down: EOF 2018/07/26 15:58:23 read closing down: EOF 2018/07/26 15:58:23 read closing down: EOF 2018/07/26 15:58:26 read closing down: EOF 2018/07/26 15:58:27 read closing down: EOF • [SLOW TEST:92.860 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance with pod network connectivity explicitly set /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ 2018/07/26 15:58:27 read closing down: EOF 2018/07/26 16:01:34 read closing down: EOF Pod name: disks-images-provider-nq854 Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pd68g Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-756zm Pod phase: Running level=info timestamp=2018-07-26T20:00:44.417243Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-26T20:00:48.322236Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T20:00:49.552748Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 20:00:50 http: TLS handshake error from 10.129.0.1:38090: EOF level=info timestamp=2018-07-26T20:00:52.878746Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 20:01:00 http: TLS handshake error from 10.129.0.1:38102: EOF level=info timestamp=2018-07-26T20:01:03.061783Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 20:01:10 http: TLS handshake error from 10.129.0.1:38114: EOF level=info timestamp=2018-07-26T20:01:13.291745Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T20:01:14.541076Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T20:01:18.568071Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T20:01:19.855473Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 20:01:20 http: TLS handshake error from 10.129.0.1:38126: EOF level=info timestamp=2018-07-26T20:01:23.497304Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 20:01:30 http: TLS handshake error from 10.129.0.1:38138: EOF Pod name: virt-api-7d79764579-st4kr Pod phase: Running 2018/07/26 19:59:31 http: TLS handshake error from 10.129.0.1:34108: EOF 2018/07/26 19:59:41 http: TLS handshake error from 10.129.0.1:34120: EOF 2018/07/26 19:59:51 http: TLS handshake error from 10.129.0.1:34132: EOF 2018/07/26 20:00:01 http: TLS handshake error from 10.129.0.1:34144: EOF 2018/07/26 20:00:11 http: TLS handshake error from 10.129.0.1:34156: EOF level=info timestamp=2018-07-26T20:00:14.340640Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 20:00:21 http: TLS handshake error from 10.129.0.1:34168: EOF 2018/07/26 20:00:31 http: TLS handshake error from 10.129.0.1:34180: EOF 2018/07/26 20:00:41 http: TLS handshake error from 10.129.0.1:34192: EOF 2018/07/26 20:00:51 http: TLS handshake error from 10.129.0.1:34204: EOF 2018/07/26 20:01:01 http: TLS handshake error from 10.129.0.1:34216: EOF 2018/07/26 20:01:11 http: TLS handshake error from 10.129.0.1:34228: EOF level=info timestamp=2018-07-26T20:01:14.327875Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 20:01:21 http: TLS handshake error from 10.129.0.1:34240: EOF 2018/07/26 20:01:31 http: TLS handshake error from 10.129.0.1:34252: EOF Pod name: virt-controller-7d57d96b65-7cbb4 Pod phase: Running level=info timestamp=2018-07-26T19:09:04.096713Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-controller-7d57d96b65-rmsl2 Pod phase: Running level=info timestamp=2018-07-26T19:56:18.552539Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5n9s5 kind= uid=f5b4a7b5-910d-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:56:18.552813Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5n9s5 kind= uid=f5b4a7b5-910d-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:56:18.641666Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:56:18.641908Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiblpq5 kind= uid=f5c1bd4e-910d-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:56:18.840833Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi44czt\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi44czt" level=info timestamp=2018-07-26T19:56:18.941159Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmipb77d\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmipb77d" level=info timestamp=2018-07-26T19:56:57.099173Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibtglb kind= uid=0ba586e3-910e-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:56:57.099513Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibtglb kind= uid=0ba586e3-910e-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:56:57.101381Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibrzxq kind= uid=0ca7e149-910e-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:56:57.101622Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmibrzxq kind= uid=0ca7e149-910e-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:56:57.114711Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:56:57.114829Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:56:57.191329Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvminvbp7 kind= uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T19:56:57.191448Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvminvbp7 kind= uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T19:56:57.773574Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvminvbp7\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvminvbp7" Pod name: virt-handler-vv95q Pod phase: Running level=info timestamp=2018-07-26T19:57:15.408373Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmibrzxq kind= uid=0ca7e149-910e-11e8-858f-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T19:57:16.336765Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmibrzxq kind= uid=0ca7e149-910e-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:57:16.336925Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmibrzxq kind= uid=0ca7e149-910e-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:57:16.598738Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmibrzxq kind= uid=0ca7e149-910e-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:57:19.176084Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvminvbp7 kind= uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:57:20.278019Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type ADDED" level=info timestamp=2018-07-26T19:57:20.330303Z pos=vm.go:657 component=virt-handler namespace=kubevirt-test-default name=testvminvbp7 kind=Domain uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Domain is in state Paused reason StartingUp" level=info timestamp=2018-07-26T19:57:21.545931Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:57:21.647573Z pos=vm.go:688 component=virt-handler namespace=kubevirt-test-default name=testvminvbp7 kind=Domain uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Domain is in state Running reason Unknown" level=info timestamp=2018-07-26T19:57:21.671589Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:57:21.685608Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvminvbp7 kind= uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:57:21.687177Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvminvbp7 kind= uid=0cbee68b-910e-11e8-858f-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T19:57:23.685146Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvminvbp7 kind= uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:57:23.686019Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvminvbp7 kind= uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:57:23.693481Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvminvbp7 kind= uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-handler-w85fb Pod phase: Running level=info timestamp=2018-07-26T19:57:18.077174Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T19:57:18.923541Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:57:19.131848Z pos=vm.go:688 component=virt-handler namespace=kubevirt-test-default name=testvmibtglb kind=Domain uid=0ba586e3-910e-11e8-858f-525500d15501 msg="Domain is in state Running reason Unknown" level=error timestamp=2018-07-26T19:57:19.270903Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7cw8w\": the object has been modified; please apply your changes to the latest version and try again" msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-26T19:57:19.624932Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7cw8w\": the object has been modified; please apply your changes to the latest version and try again" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmi7cw8w" level=info timestamp=2018-07-26T19:57:21.229710Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:57:20.708919Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmibtglb kind= uid=0ba586e3-910e-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:57:22.034395Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmibtglb kind= uid=0ba586e3-910e-11e8-858f-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T19:57:22.032443Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T19:57:22.173351Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:57:22.173534Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:57:22.196850Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:57:24.224589Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmibtglb kind= uid=0ba586e3-910e-11e8-858f-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T19:57:24.224786Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmibtglb kind= uid=0ba586e3-910e-11e8-858f-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T19:57:24.563217Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmibtglb kind= uid=0ba586e3-910e-11e8-858f-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmi7cw8w-v2c2f Pod phase: Running level=info timestamp=2018-07-26T19:57:15.559038Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:57:15.565300Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 85f6daae-1e33-4b43-81b6-b220faf11fcb" level=info timestamp=2018-07-26T19:57:15.565803Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:57:15.629332Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:16.058097Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:57:16.109662Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:57:16.110138Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:57:16.114767Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:57:16.135123Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:16.135331Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:57:16.160236Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:57:16.175344Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:16.575921Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 85f6daae-1e33-4b43-81b6-b220faf11fcb: 184" level=info timestamp=2018-07-26T19:57:22.088603Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:57:22.184436Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi7cw8w kind= uid=0cb34517-910e-11e8-858f-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmibrzxq-vwnq5 Pod phase: Running level=info timestamp=2018-07-26T19:57:14.273034Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-26T19:57:14.760380Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:57:14.773942Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 2820e10b-bac4-435d-9ddb-6453aa9e2b77" level=info timestamp=2018-07-26T19:57:14.774620Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:57:14.787425Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:15.335309Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:57:15.365132Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:57:15.374492Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:15.374626Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:57:15.388256Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmibrzxq kind= uid=0ca7e149-910e-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:57:15.390047Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmibrzxq kind= uid=0ca7e149-910e-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:57:15.393638Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:57:15.397534Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:15.817655Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 2820e10b-bac4-435d-9ddb-6453aa9e2b77: 192" level=info timestamp=2018-07-26T19:57:16.340716Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmibrzxq kind= uid=0ca7e149-910e-11e8-858f-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmibtglb-jb6d5 Pod phase: Running level=info timestamp=2018-07-26T19:57:15.788417Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-26T19:57:16.667247Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:57:16.681535Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID ebdbeeda-9261-4c45-bf2f-e24cdc9b9187" level=info timestamp=2018-07-26T19:57:16.681943Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:57:16.807815Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:17.059014Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:57:17.100607Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:57:17.112496Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmibtglb kind= uid=0ba586e3-910e-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:57:17.117697Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmibtglb kind= uid=0ba586e3-910e-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:57:17.688436Z pos=monitor.go:222 component=virt-launcher msg="Found PID for ebdbeeda-9261-4c45-bf2f-e24cdc9b9187: 187" level=info timestamp=2018-07-26T19:57:19.076698Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:19.077255Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:57:19.102326Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:57:22.100584Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:24.296468Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmibtglb kind= uid=0ba586e3-910e-11e8-858f-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvminvbp7-scg8h Pod phase: Running level=info timestamp=2018-07-26T19:57:19.455129Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-26T19:57:19.950542Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T19:57:20.331587Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:20.347761Z pos=virt-launcher.go:214 component=virt-launcher msg="Detected domain with UUID 7701fae4-93e5-4ef5-874c-13627744f00d" level=info timestamp=2018-07-26T19:57:20.348006Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T19:57:20.488061Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T19:57:20.538749Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:57:20.539922Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvminvbp7 kind= uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T19:57:20.541717Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvminvbp7 kind= uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T19:57:21.359712Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 7701fae4-93e5-4ef5-874c-13627744f00d: 202" level=info timestamp=2018-07-26T19:57:21.648547Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:21.648732Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T19:57:21.662669Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T19:57:21.674440Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T19:57:23.691325Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvminvbp7 kind= uid=0cbee68b-910e-11e8-858f-525500d15501 msg="Synced vmi" 2018/07/26 16:01:34 read closing down: EOF • Failure [187.173 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance with custom MAC address [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Expected error: : 180000000000 expect: timer expired after 180 seconds not to have occurred /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:224 ------------------------------ STEP: checking br1 MTU inside the pod level=info timestamp=2018-07-26T19:58:27.977563Z pos=vmi_networking_test.go:185 component=tests msg="4: br1: mtu 1450 qdisc noqueue state UP group default \n link/ether 0a:58:0a:ab:90:91 brd ff:ff:ff:ff:ff:ff\n inet 169.254.75.86/32 brd 169.254.75.86 scope global br1\n valid_lft forever preferred_lft forever\n inet6 fe80::858:aff:feab:9091/64 scope link \n valid_lft forever preferred_lft forever\n" STEP: checking eth0 MTU inside the VirtualMachineInstance level=info timestamp=2018-07-26T19:58:28.482775Z pos=vmi_networking_test.go:205 component=tests msg="[{1 \r\n$ [$ ]} {3 ip address show eth0\r\n2: eth0: mtu 1450 qdisc pfifo_fast qlen 1000\r\n link/ether 0a:58:0a:80:00:2d brd ff:ff:ff:ff:ff:ff [2: eth0: mtu 1450 qdisc pfifo_fast qlen 1000\r\n]} {5 \r\n inet 10.128.0.45/23 brd 10.128. [0]}]" STEP: checking the VirtualMachineInstance can send MTU sized frames to another VirtualMachineInstance level=info timestamp=2018-07-26T20:01:34.038905Z pos=utils.go:1200 component=tests namespace=kubevirt-test-default name=testvmibrzxq kind=VirtualMachineInstance uid=0ca7e149-910e-11e8-858f-525500d15501 msg="[{1 \r\n\r\n$ [$ ]} {3 ping 10.128.0.46 -c 1 -w 5 -s 1422\r\nPING 10.128.0.46 (10.128.0.46): 1422 data bytes\r\n\r\n--- 10.128.0.46 ping statistics ---\r\n5 packets transmitted, 0 packets received, 100% packet loss\r\n$ [$ ]} {5 echo $?\r\n1\r\n$ []}]" 2018/07/26 16:01:37 read closing down: EOF •2018/07/26 16:01:37 read closing down: EOF 2018/07/26 16:01:37 read closing down: EOF ------------------------------ • [SLOW TEST:5.385 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on the same node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:6.185 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on a different node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •••••2018/07/26 16:02:43 read closing down: EOF 2018/07/26 16:02:44 read closing down: EOF ------------------------------ • [SLOW TEST:34.521 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom interface model /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:379 should expose the right device type to the guest /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:380 ------------------------------ 2018/07/26 16:02:45 read closing down: EOF •2018/07/26 16:02:46 read closing down: EOF 2018/07/26 16:03:20 read closing down: EOF 2018/07/26 16:03:21 read closing down: EOF ------------------------------ • [SLOW TEST:34.992 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:413 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:414 ------------------------------ 2018/07/26 16:04:00 read closing down: EOF • [SLOW TEST:39.924 seconds] 2018/07/26 16:04:00 read closing down: EOF Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address in non-conventional format /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:425 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:426 ------------------------------ 2018/07/26 16:04:35 read closing down: EOF 2018/07/26 16:04:36 read closing down: EOF • [SLOW TEST:35.458 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address and slirp interface /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:438 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:439 ------------------------------ 2018/07/26 16:05:18 read closing down: EOF • [SLOW TEST:42.471 seconds] 2018/07/26 16:05:18 read closing down: EOF Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with disabled automatic attachment of interfaces /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:451 should not configure any external interfaces /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:452 ------------------------------ • [SLOW TEST:23.331 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should successfully start with hook sidecar annotation /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:60 ------------------------------ • [SLOW TEST:18.599 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should call Collect and OnDefineDomain on the hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:67 ------------------------------ • [SLOW TEST:21.902 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should update domain XML with SM BIOS properties /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:83 ------------------------------ • [SLOW TEST:5.559 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:48 with correct permissions /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:51 should be allowed to access subresource endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:52 ------------------------------ • ------------------------------ • [SLOW TEST:5.139 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization For Version Command /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:63 with authenticated user /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:66 should be allowed to access subresource version endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:67 ------------------------------ • [SLOW TEST:5.415 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization For Version Command /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:63 Without permissions /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:71 should be able to access subresource version endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:72 ------------------------------ 2018/07/26 16:07:23 read closing down: EOF • [SLOW TEST:40.106 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 with a cirros image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:67 should return that we are running cirros /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:68 ------------------------------ 2018/07/26 16:08:07 read closing down: EOF • [SLOW TEST:43.926 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 with a fedora image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:77 should return that we are running fedora /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:78 ------------------------------ 2018/07/26 16:08:39 read closing down: EOF 2018/07/26 16:08:40 read closing down: EOF 2018/07/26 16:08:41 read closing down: EOF 2018/07/26 16:08:42 read closing down: EOF 2018/07/26 16:08:42 read closing down: EOF • [SLOW TEST:35.526 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should be able to reconnect to console multiple times /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:87 ------------------------------ • [SLOW TEST:17.839 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should wait until the virtual machine is in running state and return a stream interface /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:103 ------------------------------ • [SLOW TEST:30.478 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should fail waiting for the virtual machine instance to be running /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:111 ------------------------------ • [SLOW TEST:30.428 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should fail waiting for the expecter /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:134 ------------------------------ • [SLOW TEST:38.422 seconds] LeaderElection /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:43 Start a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:53 when the controller pod is not running /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:54 should success /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:55 ------------------------------ • ------------------------------ • [SLOW TEST:17.668 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 should start it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:76 ------------------------------ • [SLOW TEST:18.607 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 should attach virt-launcher to it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:82 ------------------------------ ••••2018/07/26 16:11:50 read closing down: EOF ------------------------------ • [SLOW TEST:32.893 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:170 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Alpine as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ 2018/07/26 16:12:17 read closing down: EOF • [SLOW TEST:26.888 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:170 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Cirros as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:16.087 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:201 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:202 should retry starting the VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:203 ------------------------------ • [SLOW TEST:18.778 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:201 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:202 should log warning and proceed once the secret is there /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:233 ------------------------------ • [SLOW TEST:42.160 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-launcher crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:281 should be stopped and have Failed phase /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:282 ------------------------------ • [SLOW TEST:33.235 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:304 should recover and continue management /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:305 ------------------------------ • [SLOW TEST:44.892 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler is responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:335 should indicate that a node is ready for vmis /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:336 ------------------------------ • [SLOW TEST:103.023 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler is not responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:366 the node controller should react /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:405 ------------------------------ • [SLOW TEST:19.913 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with node tainted /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:458 the vmi with tolerations should be scheduled /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:480 ------------------------------ • ------------------------------ S [SKIPPING] [0.732 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:530 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-default [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:535 ------------------------------ S [SKIPPING] [0.168 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:530 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-alternative [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:535 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.177 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:591 should enable emulation in virt-launcher [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:603 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:599 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.308 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:591 should be reflected in domain XML [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:640 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:599 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.321 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:591 should request a TUN device but not KVM [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:684 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:599 ------------------------------ •••• ------------------------------ • [SLOW TEST:18.614 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance's Pod /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:836 should result in the VirtualMachineInstance moving to a finalized state /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:837 ------------------------------ • [SLOW TEST:43.663 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:868 with an active pod. /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:869 should result in pod being terminated /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:870 ------------------------------ • [SLOW TEST:25.933 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:868 with grace period greater than 0 /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:893 should run graceful shutdown /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:894 ------------------------------ • [SLOW TEST:33.393 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:945 should be in Failed phase /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:946 ------------------------------ • [SLOW TEST:27.112 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:945 should be left alone by virt-handler /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:973 ------------------------------ Waiting for namespace kubevirt-test-default to be removed, this can take a while ... Waiting for namespace kubevirt-test-alternative to be removed, this can take a while ... Summarizing 3 Failures: [Fail] Expose Expose service on a VMI replica set [BeforeEach] Expose ClusterIP service Should create a ClusterIP service on VMRS and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:36 [Fail] Networking [BeforeEach] should be able to reach the Inbound VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:94 [Fail] Networking should be able to reach [It] the Inbound VirtualMachineInstance with custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:224 Ran 136 of 148 Specs in 4205.571 seconds FAIL! -- 133 Passed | 3 Failed | 0 Pending | 12 Skipped --- FAIL: TestTests (4205.58s) FAIL make: *** [functest] Error 1 + make cluster-down ./cluster/down.sh