+ export WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + [[ openshift-3.10-release =~ openshift-.* ]] + [[ openshift-3.10-release =~ .*-crio-.* ]] + export KUBEVIRT_PROVIDER=os-3.10.0 + KUBEVIRT_PROVIDER=os-3.10.0 + export KUBEVIRT_NUM_NODES=2 + KUBEVIRT_NUM_NODES=2 + export NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + export NAMESPACE=kube-system + NAMESPACE=kube-system + trap '{ make cluster-down; }' EXIT SIGINT SIGTERM SIGSTOP + make cluster-down ./cluster/down.sh + make cluster-up ./cluster/up.sh Downloading ............................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................ Downloading ....... 2018/07/26 07:06:56 Waiting for host: 192.168.66.102:22 2018/07/26 07:06:59 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 07:07:07 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 07:07:15 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 07:07:23 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 07:07:28 Connected to tcp://192.168.66.102:22 + systemctl stop origin-node.service + rm -rf /etc/origin/ /etc/etcd/ /var/lib/origin /var/lib/etcd/ ++ docker ps -q + containers= + '[' -n '' ']' ++ docker ps -q -a + containers='2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3' + '[' -n '2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3' ']' + docker rm -f 2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3 2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3 2018/07/26 07:07:33 Waiting for host: 192.168.66.101:22 2018/07/26 07:07:36 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 07:07:44 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 07:07:52 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 07:08:00 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/26 07:08:05 Connected to tcp://192.168.66.101:22 + inventory_file=/root/inventory + openshift_ansible=/root/openshift-ansible + echo '[new_nodes]' + sed -i '/\[OSEv3:children\]/a new_nodes' /root/inventory + nodes_found=false ++ seq 2 100 + for i in '$(seq 2 100)' ++ printf node%02d 2 + node=node02 ++ printf 192.168.66.1%02d 2 + node_ip=192.168.66.102 + set +e + ping 192.168.66.102 -c 1 PING 192.168.66.102 (192.168.66.102) 56(84) bytes of data. 64 bytes from 192.168.66.102: icmp_seq=1 ttl=64 time=0.760 ms --- 192.168.66.102 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 0.760/0.760/0.760/0.000 ms Found node02. Adding it to the inventory. + '[' 0 -ne 0 ']' + nodes_found=true + set -e + echo '192.168.66.102 node02' + echo 'Found node02. Adding it to the inventory.' + echo 'node02 openshift_node_group_name="node-config-compute" openshift_schedulable=true openshift_ip=192.168.66.102' + for i in '$(seq 2 100)' ++ printf node%02d 3 + node=node03 ++ printf 192.168.66.1%02d 3 + node_ip=192.168.66.103 + set +e + ping 192.168.66.103 -c 1 PING 192.168.66.103 (192.168.66.103) 56(84) bytes of data. From 192.168.66.101 icmp_seq=1 Destination Host Unreachable --- 192.168.66.103 ping statistics --- 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms + '[' 1 -ne 0 ']' + break + '[' true = true ']' + ansible-playbook -i /root/inventory /root/openshift-ansible/playbooks/openshift-node/scaleup.yml PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) PLAY [Ensure there are new_nodes] ********************************************** TASK [fail] ******************************************************************** skipping: [localhost] TASK [fail] ******************************************************************** skipping: [localhost] PLAY [Initialization Checkpoint Start] ***************************************** TASK [Set install initialization 'In Progress'] ******************************** ok: [node01] PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) [WARNING]: Could not match supplied host pattern, ignoring: oo_lb_to_config PLAY [Ensure that all non-node hosts are accessible] *************************** TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Initialize basic host facts] ********************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/deprecations.yml for node01, node02 TASK [openshift_sanitize_inventory : Check for usage of deprecated variables] *** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : debug] ************************************ skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_stats] ******************************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Assign deprecated variables to correct counterparts] *** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml for node01, node02 included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml for node01, node02 TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Standardize on latest variable names] ***** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Normalize openshift_release] ************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/unsupported.yml for node01, node02 TASK [openshift_sanitize_inventory : Ensure that openshift_use_dnsmasq is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that openshift_node_dnsmasq_install_network_manager_hook is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* skipping: [node01] => (item=openshift_hosted_etcd_storage_kind) skipping: [node02] => (item=openshift_hosted_etcd_storage_kind) TASK [openshift_sanitize_inventory : Ensure that dynamic provisioning is set if using dynamic storage] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure clusterid is set along with the cloudprovider] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that all requires vsphere configuration variables are set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : ensure provider configuration variables are defined] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure removed web console extension variables are not set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that web console port matches API server port] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : At least one master is schedulable] ******* skipping: [node01] skipping: [node02] TASK [Detecting Operating System from ostree_booted] *************************** ok: [node02] ok: [node01] TASK [set openshift_deployment_type if unset] ********************************** skipping: [node01] skipping: [node02] TASK [check for node already bootstrapped] ************************************* ok: [node02] ok: [node01] TASK [initialize_facts set fact openshift_is_bootstrapped] ********************* ok: [node01] ok: [node02] TASK [initialize_facts set fact openshift_is_atomic and openshift_is_containerized] *** ok: [node01] ok: [node02] TASK [Determine Atomic Host Docker Version] ************************************ skipping: [node01] skipping: [node02] TASK [assert atomic host docker version is 1.12 or later] ********************** skipping: [node01] skipping: [node02] PLAY [Retrieve existing master configs and validate] *************************** TASK [openshift_control_plane : stat] ****************************************** ok: [node01] TASK [openshift_control_plane : slurp] ***************************************** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [openshift_control_plane : Check for file paths outside of /etc/origin/master in master's config] *** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Initialize special first-master variables] ******************************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Disable web console if required] ***************************************** TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Setup yum repositories for all hosts] ************************************ TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Install Red Hat Subscription manager] ******************* skipping: [node02] TASK [rhel_subscribe : Is host already registered?] **************************** skipping: [node02] TASK [rhel_subscribe : Register host] ****************************************** skipping: [node02] TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Determine if OpenShift Pool Already Attached] *********** skipping: [node02] TASK [rhel_subscribe : Attach to OpenShift Pool] ******************************* skipping: [node02] TASK [rhel_subscribe : Satellite preparation] ********************************** skipping: [node02] TASK [openshift_repos : openshift_repos detect ostree] ************************* ok: [node02] TASK [openshift_repos : Ensure libselinux-python is installed] ***************** ok: [node02] TASK [openshift_repos : Remove openshift_additional.repo file] ***************** ok: [node02] TASK [openshift_repos : Create any additional repos that are defined] ********** TASK [openshift_repos : include_tasks] ***************************************** skipping: [node02] TASK [openshift_repos : include_tasks] ***************************************** included: /root/openshift-ansible/roles/openshift_repos/tasks/centos_repos.yml for node02 TASK [openshift_repos : Configure origin gpg keys] ***************************** ok: [node02] TASK [openshift_repos : Configure correct origin release repository] *********** ok: [node02] => (item=/root/openshift-ansible/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2) TASK [openshift_repos : Ensure clean repo cache in the event repos have been changed manually] *** changed: [node02] => { "msg": "First run of openshift_repos" } TASK [openshift_repos : Record that openshift_repos already ran] *************** ok: [node02] RUNNING HANDLER [openshift_repos : refresh cache] ****************************** changed: [node02] PLAY [Install packages necessary for installer] ******************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Determine if chrony is installed] **************************************** [WARNING]: Consider using the yum, dnf or zypper module rather than running rpm. If you need to use command because yum, dnf or zypper is insufficient you can add warn=False to this command task or set command_warnings=False in ansible.cfg to get rid of this message. changed: [node02] TASK [Install ntp package] ***************************************************** skipping: [node02] TASK [Start and enable ntpd/chronyd] ******************************************* changed: [node02] TASK [Ensure openshift-ansible installer package deps are installed] *********** ok: [node02] => (item=iproute) ok: [node02] => (item=dbus-python) ok: [node02] => (item=PyYAML) ok: [node02] => (item=python-ipaddress) ok: [node02] => (item=libsemanage-python) ok: [node02] => (item=yum-utils) ok: [node02] => (item=python-docker) PLAY [Initialize cluster facts] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [get openshift_current_version] ******************************************* ok: [node02] ok: [node01] TASK [set_fact openshift_portal_net if present on masters] ********************* ok: [node01] ok: [node02] TASK [Gather Cluster facts] **************************************************** changed: [node02] changed: [node01] TASK [Set fact of no_proxy_internal_hostnames] ********************************* skipping: [node01] skipping: [node02] TASK [Initialize openshift.node.sdn_mtu] *************************************** changed: [node02] ok: [node01] PLAY [Initialize etcd host variables] ****************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** ok: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] => { "msg": "openshift_pkg_version was not defined. Falling back to -3.10.0" } TASK [openshift_version : set_fact] ******************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Verify Requirements] ***************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [Run variable sanity checks] ********************************************** ok: [node01] TASK [Validate openshift_node_groups and openshift_node_group_name] ************ ok: [node01] PLAY [Initialization Checkpoint End] ******************************************* TASK [Set install initialization 'Complete'] *********************************** ok: [node01] PLAY [Validate node hostnames] ************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Query DNS for IP address of node02] ************************************** ok: [node02] TASK [Validate openshift_hostname when defined] ******************************** skipping: [node02] TASK [Validate openshift_ip exists on node when defined] *********************** skipping: [node02] PLAY [Configure os_firewall] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [os_firewall : Detecting Atomic Host Operating System] ******************** ok: [node02] TASK [os_firewall : Set fact r_os_firewall_is_atomic] ************************** ok: [node02] TASK [os_firewall : Fail - Firewalld is not supported on Atomic Host] ********** skipping: [node02] TASK [os_firewall : Install firewalld packages] ******************************** skipping: [node02] TASK [os_firewall : Ensure iptables services are not enabled] ****************** skipping: [node02] => (item=iptables) skipping: [node02] => (item=ip6tables) TASK [os_firewall : Wait 10 seconds after disabling iptables] ****************** skipping: [node02] TASK [os_firewall : Start and enable firewalld service] ************************ skipping: [node02] TASK [os_firewall : need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail] *** skipping: [node02] TASK [os_firewall : Restart polkitd] ******************************************* skipping: [node02] TASK [os_firewall : Wait for polkit action to have been created] *************** skipping: [node02] TASK [os_firewall : Ensure firewalld service is not enabled] ******************* ok: [node02] TASK [os_firewall : Wait 10 seconds after disabling firewalld] ***************** skipping: [node02] TASK [os_firewall : Install iptables packages] ********************************* ok: [node02] => (item=iptables) ok: [node02] => (item=iptables-services) TASK [os_firewall : Start and enable iptables service] ************************* ok: [node02 -> node02] => (item=node02) TASK [os_firewall : need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail] *** skipping: [node02] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [container_runtime : Setup the docker-storage for overlay] **************** skipping: [node02] TASK [container_runtime : Create file system on extra volume device] *********** TASK [container_runtime : Create mount entry for extra volume] ***************** PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** ok: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* ok: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Add http_proxy to /etc/atomic.conf] ****************** skipping: [node02] TASK [container_runtime : Add https_proxy to /etc/atomic.conf] ***************** skipping: [node02] TASK [container_runtime : Add no_proxy to /etc/atomic.conf] ******************** skipping: [node02] TASK [container_runtime : Get current installed Docker version] **************** ok: [node02] TASK [container_runtime : Error out if Docker pre-installed but too old] ******* skipping: [node02] TASK [container_runtime : Error out if requested Docker is too old] ************ skipping: [node02] TASK [container_runtime : Install Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure docker.service.d directory exists] ************ ok: [node02] TASK [container_runtime : Configure Docker service unit file] ****************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Set registry params] ********************************* skipping: [node02] => (item={u'reg_conf_var': u'ADD_REGISTRY', u'reg_flag': u'--add-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'BLOCK_REGISTRY', u'reg_flag': u'--block-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'INSECURE_REGISTRY', u'reg_flag': u'--insecure-registry', u'reg_fact_val': []}) TASK [container_runtime : Place additional/blocked/insecure registries in /etc/containers/registries.conf] *** skipping: [node02] TASK [container_runtime : Set Proxy Settings] ********************************** skipping: [node02] => (item={u'reg_conf_var': u'HTTP_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'HTTPS_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'NO_PROXY', u'reg_fact_val': u''}) TASK [container_runtime : Set various Docker options] ************************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Configure Docker Network OPTIONS] ******************** ok: [node02] TASK [container_runtime : Detect if docker is already started] ***************** ok: [node02] TASK [container_runtime : Start the Docker service] **************************** ok: [node02] TASK [container_runtime : set_fact] ******************************************** ok: [node02] TASK [container_runtime : Check for docker_storage_path/overlay2] ************** ok: [node02] TASK [container_runtime : Fixup SELinux permissions for docker] **************** changed: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] TASK [container_runtime : Fail if Atomic Host since this is an rpm request] **** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Check that overlay is in the kernel] ***************** skipping: [node02] TASK [container_runtime : Add overlay to modprobe.d] *************************** skipping: [node02] TASK [container_runtime : Manually modprobe overlay into the kernel] *********** skipping: [node02] TASK [container_runtime : Enable and start systemd-modules-load] *************** skipping: [node02] TASK [container_runtime : Install cri-o] *************************************** skipping: [node02] TASK [container_runtime : Remove CRI-O default configuration files] ************ skipping: [node02] => (item=/etc/cni/net.d/200-loopback.conf) skipping: [node02] => (item=/etc/cni/net.d/100-crio-bridge.conf) TASK [container_runtime : Create the CRI-O configuration] ********************** skipping: [node02] TASK [container_runtime : Ensure CNI configuration directory exists] *********** skipping: [node02] TASK [container_runtime : Add iptables allow rules] **************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove iptables rules] ******************************* TASK [container_runtime : Add firewalld allow rules] *************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove firewalld allow rules] ************************ TASK [container_runtime : Configure the CNI network] *************************** skipping: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-network] ****************** skipping: [node02] TASK [container_runtime : Start the CRI-O service] ***************************** skipping: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* skipping: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** skipping: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** skipping: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** skipping: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Node Preparation Checkpoint Start] *************************************** TASK [Set Node preparation 'In Progress'] ************************************** ok: [node01] PLAY [Only target nodes that have not yet been bootstrapped] ******************* TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [add_host] **************************************************************** skipping: [localhost] => (item=node02) ok: [localhost] => (item=node01) PLAY [Disable excluders] ******************************************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/disable.yml for node02 TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-docker-excluder version detected] ************ skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-docker-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-excluder version detected] ******************* skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** skipping: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] PLAY [Configure nodes] ********************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_cloud_provider : Set cloud provider facts] ********************* skipping: [node02] TASK [openshift_cloud_provider : Create cloudprovider config dir] ************** skipping: [node02] TASK [openshift_cloud_provider : include the defined cloud provider files] ***** skipping: [node02] TASK [openshift_node : fail] *************************************************** skipping: [node02] TASK [openshift_node : Check for NetworkManager service] *********************** ok: [node02] TASK [openshift_node : Set fact using_network_manager] ************************* ok: [node02] TASK [openshift_node : Install dnsmasq] **************************************** ok: [node02] TASK [openshift_node : ensure origin/node directory exists] ******************** changed: [node02] => (item=/etc/origin) changed: [node02] => (item=/etc/origin/node) TASK [openshift_node : Install NetworkManager during node_bootstrap provisioning] *** skipping: [node02] TASK [openshift_node : Install network manager dispatch script] **************** skipping: [node02] TASK [openshift_node : Install dnsmasq configuration] ************************** ok: [node02] TASK [openshift_node : Deploy additional dnsmasq.conf] ************************* skipping: [node02] TASK [openshift_node : Enable dnsmasq] ***************************************** ok: [node02] TASK [openshift_node : Install network manager dispatch script] **************** ok: [node02] TASK [openshift_node : Add iptables allow rules] ******************************* ok: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) ok: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) ok: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) ok: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) ok: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove iptables rules] ********************************** TASK [openshift_node : Add firewalld allow rules] ****************************** skipping: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) skipping: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) skipping: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) skipping: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) skipping: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove firewalld allow rules] *************************** TASK [openshift_node : Checking for journald.conf] ***************************** ok: [node02] TASK [openshift_node : Create journald persistence directories] **************** ok: [node02] TASK [openshift_node : Update journald setup] ********************************** ok: [node02] => (item={u'var': u'Storage', u'val': u'persistent'}) ok: [node02] => (item={u'var': u'Compress', u'val': True}) ok: [node02] => (item={u'var': u'SyncIntervalSec', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitInterval', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitBurst', u'val': 10000}) ok: [node02] => (item={u'var': u'SystemMaxUse', u'val': u'8G'}) ok: [node02] => (item={u'var': u'SystemKeepFree', u'val': u'20%'}) ok: [node02] => (item={u'var': u'SystemMaxFileSize', u'val': u'10M'}) ok: [node02] => (item={u'var': u'MaxRetentionSec', u'val': u'1month'}) ok: [node02] => (item={u'var': u'MaxFileSec', u'val': u'1day'}) ok: [node02] => (item={u'var': u'ForwardToSyslog', u'val': False}) ok: [node02] => (item={u'var': u'ForwardToWall', u'val': False}) TASK [openshift_node : Restart journald] *************************************** skipping: [node02] TASK [openshift_node : Disable swap] ******************************************* ok: [node02] TASK [openshift_node : Install node, clients, and conntrack packages] ********** ok: [node02] => (item={u'name': u'origin-node-3.10.0*'}) ok: [node02] => (item={u'name': u'origin-clients-3.10.0*'}) ok: [node02] => (item={u'name': u'conntrack-tools'}) TASK [openshift_node : Restart cri-o] ****************************************** skipping: [node02] TASK [openshift_node : restart NetworkManager to ensure resolv.conf is present] *** changed: [node02] TASK [openshift_node : sysctl] ************************************************* ok: [node02] TASK [openshift_node : Check for credentials file for registry auth] *********** skipping: [node02] TASK [openshift_node : Create credentials for registry auth] ******************* skipping: [node02] TASK [openshift_node : Create credentials for registry auth (alternative)] ***** skipping: [node02] TASK [openshift_node : Setup ro mount of /root/.docker for containerized hosts] *** skipping: [node02] TASK [openshift_node : Check that node image is present] *********************** changed: [node02] TASK [openshift_node : Pre-pull node image] ************************************ skipping: [node02] TASK [openshift_node : Copy node script to the node] *************************** ok: [node02] TASK [openshift_node : Install Node service file] ****************************** ok: [node02] TASK [openshift_node : Ensure old system path is set] ************************** skipping: [node02] => (item=/etc/origin/openvswitch) skipping: [node02] => (item=/var/lib/kubelet) skipping: [node02] => (item=/opt/cni/bin) TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Copy node container image to ostree storage] ************ skipping: [node02] TASK [openshift_node : Install or Update node system container] **************** skipping: [node02] TASK [openshift_node : Restart network manager to ensure networking configuration is in place] *** skipping: [node02] TASK [openshift_node : Configure Node settings] ******************************** ok: [node02] => (item={u'regex': u'^OPTIONS=', u'line': u'OPTIONS='}) ok: [node02] => (item={u'regex': u'^DEBUG_LOGLEVEL=', u'line': u'DEBUG_LOGLEVEL=2'}) ok: [node02] => (item={u'regex': u'^IMAGE_VERSION=', u'line': u'IMAGE_VERSION=v3.10.0-rc.0'}) TASK [openshift_node : Configure Proxy Settings] ******************************* skipping: [node02] => (item={u'regex': u'^HTTP_PROXY=', u'line': u'HTTP_PROXY='}) skipping: [node02] => (item={u'regex': u'^HTTPS_PROXY=', u'line': u'HTTPS_PROXY='}) skipping: [node02] => (item={u'regex': u'^NO_PROXY=', u'line': u'NO_PROXY=[],172.30.0.0/16,10.128.0.0/14'}) TASK [openshift_node : file] *************************************************** skipping: [node02] TASK [openshift_node : Create the Node config] ********************************* changed: [node02] TASK [openshift_node : Configure Node Environment Variables] ******************* TASK [openshift_node : Ensure the node static pod directory exists] ************ changed: [node02] TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Install NFS storage plugin dependencies] **************** ok: [node02] TASK [openshift_node : Check for existence of nfs sebooleans] ****************** ok: [node02] => (item=virt_use_nfs) ok: [node02] => (item=virt_sandbox_use_nfs) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 07:15:49.356181', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.013904', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 07:15:49.342277', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 07:15:50.395120', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.019064', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 07:15:50.376056', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 07:15:49.356181', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.013904', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 07:15:49.342277', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 07:15:50.395120', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.019064', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 07:15:50.376056', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install GlusterFS storage plugin dependencies] ********** ok: [node02] TASK [openshift_node : Check for existence of fusefs sebooleans] *************** ok: [node02] => (item=virt_use_fusefs) ok: [node02] => (item=virt_sandbox_use_fusefs) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 07:15:56.035309', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.013999', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 07:15:56.021310', '_ansible_ignore_errors': None, 'failed': False}) ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 07:15:57.284588', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.015351', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 07:15:57.269237', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 07:15:56.035309', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.013999', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 07:15:56.021310', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-26 07:15:57.284588', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.015351', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-26 07:15:57.269237', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install Ceph storage plugin dependencies] *************** ok: [node02] TASK [openshift_node : Install iSCSI storage plugin dependencies] ************** ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=device-mapper-multipath) TASK [openshift_node : restart services] *************************************** ok: [node02] => (item=multipathd) ok: [node02] => (item=rpcbind) ok: [node02] => (item=iscsid) TASK [openshift_node : Template multipath configuration] *********************** changed: [node02] TASK [openshift_node : Enable and start multipath] ***************************** changed: [node02] TASK [tuned : Check for tuned package] ***************************************** ok: [node02] TASK [tuned : Set tuned OpenShift variables] *********************************** ok: [node02] TASK [tuned : Ensure directory structure exists] ******************************* ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': u's0', 'seuser': u'unconfined_u', 'serole': u'object_r', 'ctime': 1531032437.8490183, 'state': u'file', 'gid': 0, 'mode': u'0644', 'mtime': 1531032437.8490183, 'owner': u'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': u'admin_home_t'}) TASK [tuned : Ensure files are populated from templates] *********************** skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Make tuned use the recommended tuned profile on restart] ********* changed: [node02] => (item=/etc/tuned/active_profile) changed: [node02] => (item=/etc/tuned/profile_mode) TASK [tuned : Restart tuned service] ******************************************* changed: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Install logrotate] ******* ok: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Setup logrotate.d scripts] *** PLAY [node bootstrap config] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_node : install needed rpm(s)] ********************************** ok: [node02] => (item=origin-node) ok: [node02] => (item=origin-docker-excluder) ok: [node02] => (item=ansible) ok: [node02] => (item=bash-completion) ok: [node02] => (item=docker) ok: [node02] => (item=haproxy) ok: [node02] => (item=dnsmasq) ok: [node02] => (item=ntp) ok: [node02] => (item=logrotate) ok: [node02] => (item=httpd-tools) ok: [node02] => (item=bind-utils) ok: [node02] => (item=firewalld) ok: [node02] => (item=libselinux-python) ok: [node02] => (item=conntrack-tools) ok: [node02] => (item=openssl) ok: [node02] => (item=iproute) ok: [node02] => (item=python-dbus) ok: [node02] => (item=PyYAML) ok: [node02] => (item=yum-utils) ok: [node02] => (item=glusterfs-fuse) ok: [node02] => (item=device-mapper-multipath) ok: [node02] => (item=nfs-utils) ok: [node02] => (item=cockpit-ws) ok: [node02] => (item=cockpit-system) ok: [node02] => (item=cockpit-bridge) ok: [node02] => (item=cockpit-docker) ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=ceph-common) TASK [openshift_node : create the directory for node] ************************** skipping: [node02] TASK [openshift_node : laydown systemd override] ******************************* skipping: [node02] TASK [openshift_node : update the sysconfig to have necessary variables] ******* ok: [node02] => (item={u'regexp': u'^KUBECONFIG=.*', u'line': u'KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig'}) TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : disable origin-node service] **************************** changed: [node02] => (item=origin-node.service) TASK [openshift_node : Check for RPM generated config marker file .config_managed] *** ok: [node02] TASK [openshift_node : create directories for bootstrapping] ******************* ok: [node02] => (item=/root/openshift_bootstrap) changed: [node02] => (item=/var/lib/origin/openshift.local.config) changed: [node02] => (item=/var/lib/origin/openshift.local.config/node) ok: [node02] => (item=/etc/docker/certs.d/docker-registry.default.svc:5000) TASK [openshift_node : laydown the bootstrap.yml file for on boot configuration] *** ok: [node02] TASK [openshift_node : Create a symlink to the node client CA for the docker registry] *** ok: [node02] TASK [openshift_node : Remove RPM generated config files if present] *********** skipping: [node02] => (item=master) skipping: [node02] => (item=.config_managed) TASK [openshift_node : find all files in /etc/origin/node so we can remove them] *** skipping: [node02] TASK [openshift_node : Remove everything except the resolv.conf required for node] *** skipping: [node02] TASK [openshift_node_group : create node config template] ********************** changed: [node02] TASK [openshift_node_group : remove existing node config] ********************** changed: [node02] TASK [openshift_node_group : Ensure required directories are present] ********** ok: [node02] => (item=/etc/origin/node/pods) changed: [node02] => (item=/etc/origin/node/certificates) TASK [openshift_node_group : Update the sysconfig to group "node-config-compute"] *** changed: [node02] TASK [set_fact] **************************************************************** ok: [node02] PLAY [Re-enable excluder if it was previously enabled] ************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/enable.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] PLAY [Node Preparation Checkpoint End] ***************************************** TASK [Set Node preparation 'Complete'] ***************************************** ok: [node01] PLAY [Distribute bootstrap and start nodes] ************************************ TASK [openshift_node : Gather node information] ******************************** changed: [node02] ok: [node01] TASK [openshift_node : Copy master bootstrap config locally] ******************* ok: [node02] TASK [openshift_node : Distribute bootstrap kubeconfig if one does not exist] *** ok: [node01] changed: [node02] TASK [openshift_node : Start and enable node for bootstrapping] **************** changed: [node02] changed: [node01] TASK [openshift_node : Get node logs] ****************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : debug] ************************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : fail] *************************************************** skipping: [node02] skipping: [node01] PLAY [Approve any pending CSR requests from inventory nodes] ******************* TASK [Dump all candidate bootstrap hostnames] ********************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Find all hostnames for bootstrapping] ************************************ ok: [node01] TASK [Dump the bootstrap hostnames] ******************************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Approve bootstrap nodes] ************************************************* changed: [node01] TASK [Get CSRs] **************************************************************** skipping: [node01] TASK [Report approval errors] ************************************************** skipping: [node01] PLAY [Ensure any inventory labels are applied to the nodes] ******************** TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_manage_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_manage_node : Wait for Node Registration] ********************** ok: [node01 -> node01] ok: [node02 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/config.yml for node02, node01 TASK [openshift_manage_node : Set node schedulability] ************************* ok: [node02 -> node01] ok: [node01 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/set_default_node_role.yml for node02, node01 TASK [openshift_manage_node : Retrieve nodes that are marked with the infra selector or the legacy infra selector] *** ok: [node02 -> node01] TASK [openshift_manage_node : Label infra or legacy infra nodes with the new role label] *** TASK [openshift_manage_node : Retrieve non-infra, non-master nodes that are not yet labeled compute] *** ok: [node02 -> node01] TASK [openshift_manage_node : label non-master non-infra nodes compute] ******** TASK [openshift_manage_node : Label all-in-one master as a compute node] ******* skipping: [node02] PLAY RECAP ********************************************************************* localhost : ok=30 changed=0 unreachable=0 failed=0 node01 : ok=71 changed=3 unreachable=0 failed=0 node02 : ok=155 changed=33 unreachable=0 failed=0 INSTALLER STATUS *************************************************************** Initialization : Complete (0:04:04) Node Preparation : Complete (0:04:12) Sending file modes: C0755 110489328 oc Sending file modes: C0600 5649 admin.kubeconfig Cluster "node01:8443" set. Cluster "node01:8443" set. + set +e + kubectl get nodes --no-headers + cluster/kubectl.sh get nodes --no-headers node01 Ready compute,infra,master 18d v1.10.0+b81c8f8 node02 Ready compute 50s v1.10.0+b81c8f8 + kubectl_rc=0 + '[' 0 -ne 0 ']' ++ kubectl get nodes --no-headers ++ grep NotReady ++ cluster/kubectl.sh get nodes --no-headers + '[' -n '' ']' + set -e + echo 'Nodes are ready:' Nodes are ready: + kubectl get nodes + cluster/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION node01 Ready compute,infra,master 18d v1.10.0+b81c8f8 node02 Ready compute 51s v1.10.0+b81c8f8 + make cluster-sync ./cluster/build.sh Building ... sha256:ceba12cbc33e4e37a707840478a630db561e2427b78c8c9f9cd6d0b73276ab32 go version go1.10 linux/amd64 go version go1.10 linux/amd64 make[1]: Entering directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' hack/dockerized "./hack/check.sh && KUBEVIRT_VERSION= ./hack/build-go.sh install " && ./hack/build-copy-artifacts.sh sha256:ceba12cbc33e4e37a707840478a630db561e2427b78c8c9f9cd6d0b73276ab32 go version go1.10 linux/amd64 go version go1.10 linux/amd64 find: '/root/go/src/kubevirt.io/kubevirt/_out/cmd': No such file or directory Compiling tests... compiled tests.test hack/build-docker.sh build Sending build context to Docker daemon 40.37 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Running in 743e2141f1ce ---> 84920e004a40 Removing intermediate container 743e2141f1ce Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-controller ---> Running in 9868806d85f9  ---> b4f3251c6468 Removing intermediate container 9868806d85f9 Step 4/8 : WORKDIR /home/virt-controller ---> 813752072d9d Removing intermediate container b54cafe35dd6 Step 5/8 : USER 1001 ---> Running in efd14bc543f2 ---> 88b3556f36b4 Removing intermediate container efd14bc543f2 Step 6/8 : COPY virt-controller /usr/bin/virt-controller ---> 8c985ca67f4b Removing intermediate container 6c45103417da Step 7/8 : ENTRYPOINT /usr/bin/virt-controller ---> Running in af21b11414ec ---> dbf266a4a2f8 Removing intermediate container af21b11414ec Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-controller" '' ---> Running in dac09088575b ---> 2912528ae004 Removing intermediate container dac09088575b Successfully built 2912528ae004 Sending build context to Docker daemon 42.63 MB Step 1/10 : FROM kubevirt/libvirt:4.2.0 Trying to pull repository docker.io/kubevirt/libvirt ... 4.2.0: Pulling from docker.io/kubevirt/libvirt e71c36a80ba9: Already exists 301132a19a16: Pulling fs layer 6b8a7a35bb20: Pulling fs layer 9abf9a2746ab: Pulling fs layer 55f14d5ffbcf: Pulling fs layer 725d7fed8635: Pulling fs layer 725d7fed8635: Waiting 55f14d5ffbcf: Waiting 6b8a7a35bb20: Verifying Checksum 6b8a7a35bb20: Download complete 9abf9a2746ab: Verifying Checksum 9abf9a2746ab: Download complete 55f14d5ffbcf: Download complete 725d7fed8635: Download complete 301132a19a16: Verifying Checksum 301132a19a16: Download complete 301132a19a16: Pull complete 6b8a7a35bb20: Pull complete 9abf9a2746ab: Pull complete 55f14d5ffbcf: Pull complete 725d7fed8635: Pull complete Digest: sha256:5e0ea4a9a99c61bbc217d0cfa4e432dc32992c38622788ed770dc4897d86e306 Status: Downloaded newer image for docker.io/kubevirt/libvirt:4.2.0 ---> 5f0bfe81a3e0 Step 2/10 : MAINTAINER "The KubeVirt Project" ---> Running in a6e007e71324 ---> 09010a005182 Removing intermediate container a6e007e71324 Step 3/10 : RUN dnf -y install socat genisoimage util-linux libcgroup-tools ethtool net-tools sudo && dnf -y clean all && test $(id -u qemu) = 107 # make sure that the qemu user really is 107 ---> Running in da370f63dfa8  Fedora 28 - x86_64 - Updates 12 MB/s | 20 MB 00:01 Virtualization packages from Rawhide built for 205 kB/s | 57 kB 00:00 Fedora 28 - x86_64 14 MB/s | 60 MB 00:04 Last metadata expiration check: 0:00:00 ago on Thu Jul 26 07:27:57 2018. Package util-linux-2.32-2.fc28.x86_64 is already installed, skipping. Dependencies resolved. ================================================================================ Package Arch Version Repository Size ================================================================================ Installing: ethtool x86_64 2:4.17-1.fc28 updates 144 k genisoimage x86_64 1.1.11-38.fc28 fedora 315 k libcgroup-tools x86_64 0.41-17.fc28 updates 90 k net-tools x86_64 2.0-0.50.20160912git.fc28 updates 320 k socat x86_64 1.7.3.2-6.fc28 fedora 297 k sudo x86_64 1.8.23-1.fc28 updates 864 k Installing dependencies: libusal x86_64 1.1.11-38.fc28 fedora 144 k Transaction Summary ================================================================================ Install 7 Packages Total download size: 2.1 M Installed size: 7.4 M Downloading Packages: (1/7): libusal-1.1.11-38.fc28.x86_64.rpm 300 kB/s | 144 kB 00:00 (2/7): socat-1.7.3.2-6.fc28.x86_64.rpm 495 kB/s | 297 kB 00:00 (3/7): genisoimage-1.1.11-38.fc28.x86_64.rpm 485 kB/s | 315 kB 00:00 (4/7): libcgroup-tools-0.41-17.fc28.x86_64.rpm 133 kB/s | 90 kB 00:00 (5/7): ethtool-4.17-1.fc28.x86_64.rpm 251 kB/s | 144 kB 00:00 (6/7): net-tools-2.0-0.50.20160912git.fc28.x86_ 464 kB/s | 320 kB 00:00 (7/7): sudo-1.8.23-1.fc28.x86_64.rpm 1.1 MB/s | 864 kB 00:00 -------------------------------------------------------------------------------- Total 684 kB/s | 2.1 MB 00:03 Running transaction check Transaction check succeeded. Running transaction test Transaction test succeeded. Running transaction Preparing : 1/1 Installing : libusal-1.1.11-38.fc28.x86_64 1/7 Running scriptlet: libusal-1.1.11-38.fc28.x86_64 1/7 Installing : genisoimage-1.1.11-38.fc28.x86_64 2/7 Running scriptlet: genisoimage-1.1.11-38.fc28.x86_64 2/7 Installing : sudo-1.8.23-1.fc28.x86_64 3/7 Running scriptlet: sudo-1.8.23-1.fc28.x86_64 3/7 Installing : net-tools-2.0-0.50.20160912git.fc28.x86_64 4/7 Running scriptlet: net-tools-2.0-0.50.20160912git.fc28.x86_64 4/7 Installing : ethtool-2:4.17-1.fc28.x86_64 5/7 Installing : libcgroup-tools-0.41-17.fc28.x86_64 6/7 Running scriptlet: libcgroup-tools-0.41-17.fc28.x86_64 6/7 Installing : socat-1.7.3.2-6.fc28.x86_64 7/7 Running scriptlet: socat-1.7.3.2-6.fc28.x86_64 7/7 Verifying : socat-1.7.3.2-6.fc28.x86_64 1/7 Verifying : genisoimage-1.1.11-38.fc28.x86_64 2/7 Verifying : libusal-1.1.11-38.fc28.x86_64 3/7 Verifying : libcgroup-tools-0.41-17.fc28.x86_64 4/7 Verifying : ethtool-2:4.17-1.fc28.x86_64 5/7 Verifying : net-tools-2.0-0.50.20160912git.fc28.x86_64 6/7 Verifying : sudo-1.8.23-1.fc28.x86_64 7/7 Installed: ethtool.x86_64 2:4.17-1.fc28 genisoimage.x86_64 1.1.11-38.fc28 libcgroup-tools.x86_64 0.41-17.fc28 net-tools.x86_64 2.0-0.50.20160912git.fc28 socat.x86_64 1.7.3.2-6.fc28 sudo.x86_64 1.8.23-1.fc28 libusal.x86_64 1.1.11-38.fc28 Complete! 23 files removed ---> fc9481693838 Removing intermediate container da370f63dfa8 Step 4/10 : COPY virt-launcher /usr/bin/virt-launcher ---> 0cf2a6b14089 Removing intermediate container 8b61692e4ca6 Step 5/10 : COPY kubevirt-sudo /etc/sudoers.d/kubevirt ---> 723edf6ad697 Removing intermediate container d2a0e2bfac47 Step 6/10 : RUN setcap CAP_NET_BIND_SERVICE=+eip /usr/bin/qemu-system-x86_64 ---> Running in 5b9d28339477  ---> 7777c81d751e Removing intermediate container 5b9d28339477 Step 7/10 : RUN mkdir -p /usr/share/kubevirt/virt-launcher ---> Running in 76acafbac59c  ---> 3c0e5933debd Removing intermediate container 76acafbac59c Step 8/10 : COPY entrypoint.sh libvirtd.sh sock-connector /usr/share/kubevirt/virt-launcher/ ---> 5def4310fb4c Removing intermediate container 221e706ea0a1 Step 9/10 : ENTRYPOINT /usr/share/kubevirt/virt-launcher/entrypoint.sh ---> Running in 095d685141f1 ---> 306f0662dfce Removing intermediate container 095d685141f1 Step 10/10 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-launcher" '' ---> Running in afe2bb400b46 ---> 9f95aac414eb Removing intermediate container afe2bb400b46 Successfully built 9f95aac414eb Sending build context to Docker daemon 41.66 MB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/5 : COPY virt-handler /usr/bin/virt-handler ---> e065c03b2915 Removing intermediate container aca95d5d1c8e Step 4/5 : ENTRYPOINT /usr/bin/virt-handler ---> Running in c75424983055 ---> 5879bf5e3066 Removing intermediate container c75424983055 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-handler" '' ---> Running in 910751c186f2 ---> f38b597346d6 Removing intermediate container 910751c186f2 Successfully built f38b597346d6 Sending build context to Docker daemon 38.76 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-api ---> Running in bea53b9d7c1b  ---> 3cff23506e80 Removing intermediate container bea53b9d7c1b Step 4/8 : WORKDIR /home/virt-api ---> e94c5606b96b Removing intermediate container f6e20ab30236 Step 5/8 : USER 1001 ---> Running in 8e97a7200224 ---> af16317199f5 Removing intermediate container 8e97a7200224 Step 6/8 : COPY virt-api /usr/bin/virt-api ---> 00e02711adcd Removing intermediate container b5398de481d1 Step 7/8 : ENTRYPOINT /usr/bin/virt-api ---> Running in 336c6775880a ---> 85be9c72a790 Removing intermediate container 336c6775880a Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-api" '' ---> Running in 94d78be93ee0 ---> 1111cdab4474 Removing intermediate container 94d78be93ee0 Successfully built 1111cdab4474 Sending build context to Docker daemon 4.096 kB Step 1/7 : FROM fedora:28 ---> cc510acfcd70 Step 2/7 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/7 : ENV container docker ---> Running in 56e548fdd027 ---> aed3ca4ac3a3 Removing intermediate container 56e548fdd027 Step 4/7 : RUN mkdir -p /images/custom /images/alpine && truncate -s 64M /images/custom/disk.img && curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /images/alpine/disk.img ---> Running in f695ee32be9d   % Total % Received % Xferd Average Speed Time Time Time Current  Dload Upload Total Spent Left Speed 0  0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0  0 0 0 --:--:-- --:--:-- --:--:-- 0 4 37.0M 4 1843k 0 0 1744k 0 0:00:21 0:00:01 0:00:20 1742k  26 37.0M 26 9.8M 0 0 4907k 0 0:00:07 0:00:02 0:00:05 4904k 57 37.0M 57 21.2M 0 0 7111k 0 0:00:05 0:00:03 0:00:02 7109k 100 37.0M 100 37.0M 0 0 9667k 0 0:00:03 0:00:03 --:--:-- 9665k  ---> c7d0cf8fc982 Removing intermediate container f695ee32be9d Step 5/7 : ADD entrypoint.sh / ---> 0393e5ee0c37 Removing intermediate container a4f7f810c739 Step 6/7 : CMD /entrypoint.sh ---> Running in 82ced3c5b9f0 ---> 23798f49dea3 Removing intermediate container 82ced3c5b9f0 Step 7/7 : LABEL "disks-images-provider" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in 2e5cfa97322f ---> 628bfca144bf Removing intermediate container 2e5cfa97322f Successfully built 628bfca144bf Sending build context to Docker daemon 2.56 kB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/5 : ENV container docker ---> Using cache ---> aed3ca4ac3a3 Step 4/5 : RUN dnf -y install procps-ng nmap-ncat && dnf -y clean all ---> Running in 014e448ab447 Fedora 28 - x86_64 - Updates 697 kB/s | 20 MB 00:29 Fedora 28 - x86_64 6.1 MB/s | 60 MB 00:09 Last metadata expiration check: 0:00:32 ago on Thu Jul 26 07:29:40 2018. Dependencies resolved. ================================================================================ Package Arch Version Repository Size ================================================================================ Installing: nmap-ncat x86_64 2:7.60-12.fc28 fedora 235 k procps-ng x86_64 3.3.12-3.fc28 updates 404 k Transaction Summary ================================================================================ Install 2 Packages Total download size: 638 k Installed size: 1.4 M Downloading Packages: (1/2): procps-ng-3.3.12-3.fc28.x86_64.rpm 1.5 MB/s | 404 kB 00:00 (2/2): nmap-ncat-7.60-12.fc28.x86_64.rpm 277 kB/s | 235 kB 00:00 -------------------------------------------------------------------------------- Total 453 kB/s | 638 kB 00:01 Running transaction check Transaction check succeeded. Running transaction test Transaction test succeeded. Running transaction Preparing : 1/1 Installing : procps-ng-3.3.12-3.fc28.x86_64 1/2 Installing : nmap-ncat-2:7.60-12.fc28.x86_64 2/2 Running scriptlet: nmap-ncat-2:7.60-12.fc28.x86_64 2/2 Verifying : nmap-ncat-2:7.60-12.fc28.x86_64 1/2 Verifying : procps-ng-3.3.12-3.fc28.x86_64 2/2 Installed: nmap-ncat.x86_64 2:7.60-12.fc28 procps-ng.x86_64 3.3.12-3.fc28 Complete! 18 files removed ---> d8c990eaf575 Removing intermediate container 014e448ab447 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "vm-killer" '' ---> Running in 3bcb0df1287d ---> 2ed275c4bfd0 Removing intermediate container 3bcb0df1287d Successfully built 2ed275c4bfd0 Sending build context to Docker daemon 5.12 kB Step 1/7 : FROM debian:sid Trying to pull repository docker.io/library/debian ... sid: Pulling from docker.io/library/debian e367ac4072dc: Pulling fs layer e367ac4072dc: Verifying Checksum e367ac4072dc: Download complete e367ac4072dc: Pull complete Digest: sha256:dd291f3ce991dc1dabcaa29eafeab11cca9403fb0dfc7b7dcca7dcce6a48a97d Status: Downloaded newer image for docker.io/debian:sid ---> 68f33cf86aab Step 2/7 : MAINTAINER "David Vossel" \ ---> Running in ff31ff042f65 ---> 50fc79ebe51c Removing intermediate container ff31ff042f65 Step 3/7 : ENV container docker ---> Running in 2c40f3169caa ---> b8e063496923 Removing intermediate container 2c40f3169caa Step 4/7 : RUN apt-get update && apt-get install -y bash curl bzip2 qemu-utils && mkdir -p /disk && rm -rf /var/lib/apt/lists/* ---> Running in 3b47f7fdc2b4 Get:1 http://cdn-fastly.deb.debian.org/debian sid InRelease [233 kB] Get:2 http://cdn-fastly.deb.debian.org/debian sid/main amd64 Packages [8114 kB] Fetched 8347 kB in 5s (1571 kB/s) Reading package lists... Reading package lists... Building dependency tree... Reading state information... bash is already the newest version (4.4.18-3.1). bash set to manually installed. The following additional packages will be installed: ca-certificates krb5-locales libaio1 libcurl4 libfreetype6 libglib2.0-0 libglib2.0-data libgraphite2-3 libgssapi-krb5-2 libharfbuzz0b libicu-le-hb0 libicu60 libk5crypto3 libkeyutils1 libkrb5-3 libkrb5support0 libldap-2.4-2 libldap-common libnghttp2-14 libpng16-16 libpsl5 librtmp1 libsasl2-2 libsasl2-modules libsasl2-modules-db libssh2-1 libssl1.1 libxml2 openssl publicsuffix shared-mime-info xdg-user-dirs Suggested packages: bzip2-doc krb5-doc krb5-user libsasl2-modules-gssapi-mit | libsasl2-modules-gssapi-heimdal libsasl2-modules-ldap libsasl2-modules-otp libsasl2-modules-sql debootstrap qemu-block-extra The following NEW packages will be installed: bzip2 ca-certificates curl krb5-locales libaio1 libcurl4 libfreetype6 libglib2.0-0 libglib2.0-data libgraphite2-3 libgssapi-krb5-2 libharfbuzz0b libicu-le-hb0 libicu60 libk5crypto3 libkeyutils1 libkrb5-3 libkrb5support0 libldap-2.4-2 libldap-common libnghttp2-14 libpng16-16 libpsl5 librtmp1 libsasl2-2 libsasl2-modules libsasl2-modules-db libssh2-1 libssl1.1 libxml2 openssl publicsuffix qemu-utils shared-mime-info xdg-user-dirs 0 upgraded, 35 newly installed, 0 to remove and 16 not upgraded. Need to get 22.7 MB of archives. After this operation, 75.2 MB of additional disk space will be used. Get:1 http://cdn-fastly.deb.debian.org/debian sid/main amd64 bzip2 amd64 1.0.6-8.1 [47.5 kB] Get:2 http://cdn-fastly.deb.debian.org/debian sid/main amd64 krb5-locales all 1.16-2 [94.7 kB] Get:3 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libssl1.1 amd64 1.1.0h-4 [1352 kB] Get:4 http://cdn-fastly.deb.debian.org/debian sid/main amd64 openssl amd64 1.1.0h-4 [744 kB] Get:5 http://cdn-fastly.deb.debian.org/debian sid/main amd64 ca-certificates all 20180409 [161 kB] Get:6 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libkeyutils1 amd64 1.5.9-9.3 [13.0 kB] Get:7 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libkrb5support0 amd64 1.16-2 [62.8 kB] Get:8 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libk5crypto3 amd64 1.16-2 [121 kB] Get:9 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libkrb5-3 amd64 1.16-2 [316 kB] Get:10 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libgssapi-krb5-2 amd64 1.16-2 [158 kB] Get:11 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libsasl2-modules-db amd64 2.1.27~101-g0780600+dfsg-3.1 [68.6 kB] Get:12 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libsasl2-2 amd64 2.1.27~101-g0780600+dfsg-3.1 [105 kB] Get:13 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libldap-common all 2.4.46+dfsg-5 [88.3 kB] Get:14 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libldap-2.4-2 amd64 2.4.46+dfsg-5 [223 kB] Get:15 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libnghttp2-14 amd64 1.32.0-1 [83.0 kB] Get:16 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libpsl5 amd64 0.20.2-1 [53.7 kB] Get:17 http://cdn-fastly.deb.debian.org/debian sid/main amd64 librtmp1 amd64 2.4+20151223.gitfa8646d.1-2 [60.5 kB] Get:18 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libssh2-1 amd64 1.8.0-2 [138 kB] Get:19 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libcurl4 amd64 7.60.0-2 [316 kB] Get:20 http://cdn-fastly.deb.debian.org/debian sid/main amd64 curl amd64 7.60.0-2 [254 kB] Get:21 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libaio1 amd64 0.3.111-1 [10.5 kB] Get:22 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libpng16-16 amd64 1.6.34-2 [288 kB] Get:23 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libfreetype6 amd64 2.8.1-2 [461 kB] Get:24 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libglib2.0-0 amd64 2.56.1-2 [2928 kB] Get:25 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libglib2.0-data all 2.56.1-2 [2671 kB] Get:26 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libgraphite2-3 amd64 1.3.11-2 [85.2 kB] Get:27 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libharfbuzz0b amd64 1.8.4-1 [875 kB] Get:28 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libicu60 amd64 60.2-6 [8073 kB] Get:29 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libicu-le-hb0 amd64 1.0.3+git161113-5 [14.6 kB] Get:30 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libsasl2-modules amd64 2.1.27~101-g0780600+dfsg-3.1 [103 kB] Get:31 http://cdn-fastly.deb.debian.org/debian sid/main amd64 libxml2 amd64 2.9.4+dfsg1-7+b1 [725 kB] Get:32 http://cdn-fastly.deb.debian.org/debian sid/main amd64 publicsuffix all 20180523.2326-2 [112 kB] Get:33 http://cdn-fastly.deb.debian.org/debian sid/main amd64 qemu-utils amd64 1:2.12+dfsg-3 [1041 kB] Get:34 http://cdn-fastly.deb.debian.org/debian sid/main amd64 shared-mime-info amd64 1.9-2 [753 kB] Get:35 http://cdn-fastly.deb.debian.org/debian sid/main amd64 xdg-user-dirs amd64 0.17-1 [53.5 kB] debconf: delaying package configuration, since apt-utils is not installed Fetched 22.7 MB in 4s (5487 kB/s) Selecting previously unselected package bzip2. (Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 6571 files and directories currently installed.) Preparing to unpack .../00-bzip2_1.0.6-8.1_amd64.deb ... Unpacking bzip2 (1.0.6-8.1) ... Selecting previously unselected package krb5-locales. Preparing to unpack .../01-krb5-locales_1.16-2_all.deb ... Unpacking krb5-locales (1.16-2) ... Selecting previously unselected package libssl1.1:amd64. Preparing to unpack .../02-libssl1.1_1.1.0h-4_amd64.deb ... Unpacking libssl1.1:amd64 (1.1.0h-4) ... Selecting previously unselected package openssl. Preparing to unpack .../03-openssl_1.1.0h-4_amd64.deb ... Unpacking openssl (1.1.0h-4) ... Selecting previously unselected package ca-certificates. Preparing to unpack .../04-ca-certificates_20180409_all.deb ... Unpacking ca-certificates (20180409) ... Selecting previously unselected package libkeyutils1:amd64. Preparing to unpack .../05-libkeyutils1_1.5.9-9.3_amd64.deb ... Unpacking libkeyutils1:amd64 (1.5.9-9.3) ... Selecting previously unselected package libkrb5support0:amd64. Preparing to unpack .../06-libkrb5support0_1.16-2_amd64.deb ... Unpacking libkrb5support0:amd64 (1.16-2) ... Selecting previously unselected package libk5crypto3:amd64. Preparing to unpack .../07-libk5crypto3_1.16-2_amd64.deb ... Unpacking libk5crypto3:amd64 (1.16-2) ... Selecting previously unselected package libkrb5-3:amd64. Preparing to unpack .../08-libkrb5-3_1.16-2_amd64.deb ... Unpacking libkrb5-3:amd64 (1.16-2) ... Selecting previously unselected package libgssapi-krb5-2:amd64. Preparing to unpack .../09-libgssapi-krb5-2_1.16-2_amd64.deb ... Unpacking libgssapi-krb5-2:amd64 (1.16-2) ... Selecting previously unselected package libsasl2-modules-db:amd64. Preparing to unpack .../10-libsasl2-modules-db_2.1.27~101-g0780600+dfsg-3.1_amd64.deb ... Unpacking libsasl2-modules-db:amd64 (2.1.27~101-g0780600+dfsg-3.1) ... Selecting previously unselected package libsasl2-2:amd64. Preparing to unpack .../11-libsasl2-2_2.1.27~101-g0780600+dfsg-3.1_amd64.deb ... Unpacking libsasl2-2:amd64 (2.1.27~101-g0780600+dfsg-3.1) ... Selecting previously unselected package libldap-common. Preparing to unpack .../12-libldap-common_2.4.46+dfsg-5_all.deb ... Unpacking libldap-common (2.4.46+dfsg-5) ... Selecting previously unselected package libldap-2.4-2:amd64. Preparing to unpack .../13-libldap-2.4-2_2.4.46+dfsg-5_amd64.deb ... Unpacking libldap-2.4-2:amd64 (2.4.46+dfsg-5) ... Selecting previously unselected package libnghttp2-14:amd64. Preparing to unpack .../14-libnghttp2-14_1.32.0-1_amd64.deb ... Unpacking libnghttp2-14:amd64 (1.32.0-1) ... Selecting previously unselected package libpsl5:amd64. Preparing to unpack .../15-libpsl5_0.20.2-1_amd64.deb ... Unpacking libpsl5:amd64 (0.20.2-1) ... Selecting previously unselected package librtmp1:amd64. Preparing to unpack .../16-librtmp1_2.4+20151223.gitfa8646d.1-2_amd64.deb ... Unpacking librtmp1:amd64 (2.4+20151223.gitfa8646d.1-2) ... Selecting previously unselected package libssh2-1:amd64. Preparing to unpack .../17-libssh2-1_1.8.0-2_amd64.deb ... Unpacking libssh2-1:amd64 (1.8.0-2) ... Selecting previously unselected package libcurl4:amd64. Preparing to unpack .../18-libcurl4_7.60.0-2_amd64.deb ... Unpacking libcurl4:amd64 (7.60.0-2) ... Selecting previously unselected package curl. Preparing to unpack .../19-curl_7.60.0-2_amd64.deb ... Unpacking curl (7.60.0-2) ... Selecting previously unselected package libaio1:amd64. Preparing to unpack .../20-libaio1_0.3.111-1_amd64.deb ... Unpacking libaio1:amd64 (0.3.111-1) ... Selecting previously unselected package libpng16-16:amd64. Preparing to unpack .../21-libpng16-16_1.6.34-2_amd64.deb ... Unpacking libpng16-16:amd64 (1.6.34-2) ... Selecting previously unselected package libfreetype6:amd64. Preparing to unpack .../22-libfreetype6_2.8.1-2_amd64.deb ... Unpacking libfreetype6:amd64 (2.8.1-2) ... Selecting previously unselected package libglib2.0-0:amd64. Preparing to unpack .../23-libglib2.0-0_2.56.1-2_amd64.deb ... Unpacking libglib2.0-0:amd64 (2.56.1-2) ... Selecting previously unselected package libglib2.0-data. Preparing to unpack .../24-libglib2.0-data_2.56.1-2_all.deb ... Unpacking libglib2.0-data (2.56.1-2) ... Selecting previously unselected package libgraphite2-3:amd64. Preparing to unpack .../25-libgraphite2-3_1.3.11-2_amd64.deb ... Unpacking libgraphite2-3:amd64 (1.3.11-2) ... Selecting previously unselected package libharfbuzz0b:amd64. Preparing to unpack .../26-libharfbuzz0b_1.8.4-1_amd64.deb ... Unpacking libharfbuzz0b:amd64 (1.8.4-1) ... Selecting previously unselected package libicu60:amd64. Preparing to unpack .../27-libicu60_60.2-6_amd64.deb ... Unpacking libicu60:amd64 (60.2-6) ... Selecting previously unselected package libicu-le-hb0:amd64. Preparing to unpack .../28-libicu-le-hb0_1.0.3+git161113-5_amd64.deb ... Unpacking libicu-le-hb0:amd64 (1.0.3+git161113-5) ... Selecting previously unselected package libsasl2-modules:amd64. Preparing to unpack .../29-libsasl2-modules_2.1.27~101-g0780600+dfsg-3.1_amd64.deb ... Unpacking libsasl2-modules:amd64 (2.1.27~101-g0780600+dfsg-3.1) ... Selecting previously unselected package libxml2:amd64. Preparing to unpack .../30-libxml2_2.9.4+dfsg1-7+b1_amd64.deb ... Unpacking libxml2:amd64 (2.9.4+dfsg1-7+b1) ... Selecting previously unselected package publicsuffix. Preparing to unpack .../31-publicsuffix_20180523.2326-2_all.deb ... Unpacking publicsuffix (20180523.2326-2) ... Selecting previously unselected package qemu-utils. Preparing to unpack .../32-qemu-utils_1%3a2.12+dfsg-3_amd64.deb ... Unpacking qemu-utils (1:2.12+dfsg-3) ... Selecting previously unselected package shared-mime-info. Preparing to unpack .../33-shared-mime-info_1.9-2_amd64.deb ... Unpacking shared-mime-info (1.9-2) ... Selecting previously unselected package xdg-user-dirs. Preparing to unpack .../34-xdg-user-dirs_0.17-1_amd64.deb ... Unpacking xdg-user-dirs (0.17-1) ... Setting up libnghttp2-14:amd64 (1.32.0-1) ... Setting up libpng16-16:amd64 (1.6.34-2) ... Setting up libldap-common (2.4.46+dfsg-5) ... Setting up libpsl5:amd64 (0.20.2-1) ... Setting up libglib2.0-0:amd64 (2.56.1-2) ... No schema files found: doing nothing. Setting up libsasl2-modules-db:amd64 (2.1.27~101-g0780600+dfsg-3.1) ... Setting up libsasl2-2:amd64 (2.1.27~101-g0780600+dfsg-3.1) ... Setting up librtmp1:amd64 (2.4+20151223.gitfa8646d.1-2) ... Setting up libfreetype6:amd64 (2.8.1-2) ... Setting up bzip2 (1.0.6-8.1) ... Setting up libgraphite2-3:amd64 (1.3.11-2) ... Setting up libssh2-1:amd64 (1.8.0-2) ... Setting up libglib2.0-data (2.56.1-2) ... Setting up krb5-locales (1.16-2) ... Processing triggers for libc-bin (2.27-4) ... Setting up libaio1:amd64 (0.3.111-1) ... Setting up publicsuffix (20180523.2326-2) ... Setting up libldap-2.4-2:amd64 (2.4.46+dfsg-5) ... Setting up libssl1.1:amd64 (1.1.0h-4) ... debconf: unable to initialize frontend: Dialog debconf: (TERM is not set, so the dialog frontend is not usable.) debconf: falling back to frontend: Readline debconf: unable to initialize frontend: Readline debconf: (Can't locate Term/ReadLine.pm in @INC (you may need to install the Term::ReadLine module) (@INC contains: /etc/perl /usr/local/lib/x86_64-linux-gnu/perl/5.26.2 /usr/local/share/perl/5.26.2 /usr/lib/x86_64-linux-gnu/perl5/5.26 /usr/share/perl5 /usr/lib/x86_64-linux-gnu/perl/5.26 /usr/share/perl/5.26 /usr/local/lib/site_perl /usr/lib/x86_64-linux-gnu/perl-base) at /usr/share/perl5/Debconf/FrontEnd/Readline.pm line 7.) debconf: falling back to frontend: Teletype Setting up openssl (1.1.0h-4) ... Setting up libkeyutils1:amd64 (1.5.9-9.3) ... Setting up libsasl2-modules:amd64 (2.1.27~101-g0780600+dfsg-3.1) ... Setting up ca-certificates (20180409) ... debconf: unable to initialize frontend: Dialog debconf: (TERM is not set, so the dialog frontend is not usable.) debconf: falling back to frontend: Readline debconf: unable to initialize frontend: Readline debconf: (Can't locate Term/ReadLine.pm in @INC (you may need to install the Term::ReadLine module) (@INC contains: /etc/perl /usr/local/lib/x86_64-linux-gnu/perl/5.26.2 /usr/local/share/perl/5.26.2 /usr/lib/x86_64-linux-gnu/perl5/5.26 /usr/share/perl5 /usr/lib/x86_64-linux-gnu/perl/5.26 /usr/share/perl/5.26 /usr/local/lib/site_perl /usr/lib/x86_64-linux-gnu/perl-base) at /usr/share/perl5/Debconf/FrontEnd/Readline.pm line 7.) debconf: falling back to frontend: Teletype Updating certificates in /etc/ssl/certs... 133 added, 0 removed; done. Setting up xdg-user-dirs (0.17-1) ... Setting up libharfbuzz0b:amd64 (1.8.4-1) ... Setting up libkrb5support0:amd64 (1.16-2) ... Setting up qemu-utils (1:2.12+dfsg-3) ... Setting up libk5crypto3:amd64 (1.16-2) ... Setting up libkrb5-3:amd64 (1.16-2) ... Setting up libgssapi-krb5-2:amd64 (1.16-2) ... Setting up libcurl4:amd64 (7.60.0-2) ... Setting up curl (7.60.0-2) ... Setting up libicu-le-hb0:amd64 (1.0.3+git161113-5) ... Setting up libicu60:amd64 (60.2-6) ... Setting up libxml2:amd64 (2.9.4+dfsg1-7+b1) ... Setting up shared-mime-info (1.9-2) ... Processing triggers for libc-bin (2.27-4) ... Processing triggers for ca-certificates (20180409) ... Updating certificates in /etc/ssl/certs... 0 added, 0 removed; done. Running hooks in /etc/ca-certificates/update.d... done. ---> 8adb1572b35c Removing intermediate container 3b47f7fdc2b4 Step 5/7 : ADD entry-point.sh / ---> 8c0c5a52e4df Removing intermediate container 2c1083ae8d3d Step 6/7 : CMD /entry-point.sh ---> Running in 74c076989704 ---> 1a4b838e5dee Removing intermediate container 74c076989704 Step 7/7 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "registry-disk-v1alpha" '' ---> Running in 71a6b1b70650 ---> 7aa3fd44f8c9 Removing intermediate container 71a6b1b70650 Successfully built 7aa3fd44f8c9 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32816/kubevirt/registry-disk-v1alpha:devel ---> 7aa3fd44f8c9 Step 2/4 : MAINTAINER "David Vossel" \ ---> Running in 958eb1e56f7a ---> 5e0c3d37503b Removing intermediate container 958eb1e56f7a Step 3/4 : RUN curl https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img > /disk/cirros.img ---> Running in f72ff7158990  % Total % Received % Xferd Average Speed Time Time Time Current     Dload Upload Total  Spent Left  Speed 0 0 0 0  0 0 0  0 --:--:-- --:--:-- --:--:-- 0 0 12.1M 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 81 12.1M 81 9.8M 0 0 7653k  0 0:00:01 0:00:01 --:--:-- 7653k 100 12.1M 100 12.1M 0 0 8863k 0 0:00:01 0:00:01 --:--:-- 8857k  ---> 2acb8de4d71e Removing intermediate container f72ff7158990 Step 4/4 : LABEL "cirros-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in df9c7a39f2a7 ---> 89f88bb54bf2 Removing intermediate container df9c7a39f2a7 Successfully built 89f88bb54bf2 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32816/kubevirt/registry-disk-v1alpha:devel ---> 7aa3fd44f8c9 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Running in 58ca91486e1c ---> 776bfb123af4 Removing intermediate container 58ca91486e1c Step 3/4 : RUN curl -g -L https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 > /disk/fedora.qcow2 ---> Running in 18e26f38cdbb   % Total % Received % Xferd Average Speed Time Time Time Current  Dload Upload Total Spent Left Speed 0 0 0 0 0  0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0  0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0  0 0 0 --:--:-- 0:00:01 --:--:-- 0 0 221M 0 1087k 0 0 396k 0 0:09:31 0:00:02 0:09:29 544k 1 221M 1 4477k 0 0 1204k 0 0:03:08 0:00:03 0:03:05 1506k 6 221M 6 13.3M 0 0 2885k 0 0:01:18 0:00:04 0:01:14 3422k 16 221M 16 37.2M 0 0 6679k 0 0:00:33 0:00:05 0:00:28 7679k 33 221M 33 73.6M 0 0 10.9M 0 0:00:20 0:00:06 0:00:14 14.8M 47 221M 47 104M 0 0 13.5M 0 0:00:16 0:00:07 0:00:09 20.7M 50 221M 50 112M  0 0 12.8M 0 0:00:17 0:00:08 0:00:09 21.4M 53 221M 53 119M 0 0 12.2M 0 0:00:18 0:00:09 0:00:09 21.2M 57 221M 57 126M 0 0 11.8M 0 0:00:18 0:00:10 0:00:08 17.8M 60 221M 60 134M 0 0 11.4M 0 0:00:19 0:00:11 0:00:08 12.1M 64  221M 64  141M 0 0 11.1M 0 0:00:19 0:00:12 0:00:07 7655k 67 221M 67 149M 0 0 10.8M 0 0:00:20 0:00:13 0:00:07 7676k 70 221M 70 157M 0 0 10.6M 0 0:00:20 0:00:14 0:00:06 7795k 74 221M 74 164M 0 0 10.4M 0 0:00:21 0:00:15 0:00:06 7861k 78 221M 78 173M 0 0 10.3M 0 0:00:21 0:00:16 0:00:05 7971k 81 221M 81 181M 0 0 10.2M 0 0:00:21 0:00:17 0:00:04 8154k  84 221M 84 186M 0 0 9.9M 0 0:00:22 0:00:18 0:00:04 7671k 86 221M 86 191M 0 0  9964k 0 0:00:22 0:00:19 0:00:03 7106k 88 221M 88 195M 0 0 9684k 0 0:00:23 0:00:20 0:00:03 6343k 90 221M 90 200M 0 0 9449k 0  0:00:24 0:00:21 0:00:03 5584k 92 221M 92 204M 0 0 9231k 0 0:00:24 0:00:22 0:00:02 4744k 94 221M 94 209M 0 0 9032k 0 0:00:25 0:00:23 0:00:02 4580k 96 221M 96 213M 0 0 8857k  0 0:00:25 0:00:24 0:00:01 4492k 98 221M 98 218M 0 0 8691k 0 0:00:26 0:00:25 0:00:01 4592k 100 221M 100 221M 0  0 8601k 0 0:00:26 0:00:26 --:--:-- 4651k  ---> 288211d2b493 Removing intermediate container 18e26f38cdbb Step 4/4 : LABEL "fedora-cloud-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in 19df221e37da ---> 0912477735f2 Removing intermediate container 19df221e37da Successfully built 0912477735f2 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32816/kubevirt/registry-disk-v1alpha:devel ---> 7aa3fd44f8c9 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 776bfb123af4 Step 3/4 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /disk/alpine.iso ---> Running in 5c5e57110755  % Total % Received % Xferd Average Speed  Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0  0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 11 37.0M 11 4536k 0 0 4086k 0 0:00:09 0:00:01  0:00:08 4083k 63 37.0M 63 23.3M 0 0 11.0M 0 0:00:03 0:00:02 0:00:01 11.0M 100 37.0M 100 37.0M 0 0 15.2M 0 0:00:02 0:00:02 --:--:-- 15.2M  ---> c0c8be599bed Removing intermediate container 5c5e57110755 Step 4/4 : LABEL "alpine-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in e05367cf2952 ---> f4b34e404811 Removing intermediate container e05367cf2952 Successfully built f4b34e404811 Sending build context to Docker daemon 35.57 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virtctl ---> Running in 9943b9fe139c  ---> d74088d7a4fc Removing intermediate container 9943b9fe139c Step 4/8 : WORKDIR /home/virtctl ---> c8c857bf8d96 Removing intermediate container 72a3679d6fe7 Step 5/8 : USER 1001 ---> Running in 3f21816457d6 ---> 36730a67b946 Removing intermediate container 3f21816457d6 Step 6/8 : COPY subresource-access-test /subresource-access-test ---> 8f6ad085e234 Removing intermediate container 0cad2b32e13f Step 7/8 : ENTRYPOINT /subresource-access-test ---> Running in 3c66dd7ab995 ---> 3b54193ee4e7 Removing intermediate container 3c66dd7ab995 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "subresource-access-test" '' ---> Running in 50ca3c2fcafb ---> b639ec725b32 Removing intermediate container 50ca3c2fcafb Successfully built b639ec725b32 Sending build context to Docker daemon 3.072 kB Step 1/9 : FROM fedora:28 ---> cc510acfcd70 Step 2/9 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 84920e004a40 Step 3/9 : ENV container docker ---> Using cache ---> aed3ca4ac3a3 Step 4/9 : RUN dnf -y install make git gcc && dnf -y clean all ---> Running in 02a8f5f5b75f Fedora 28 - x86_64 - Updates 4.1 MB/s | 20 MB 00:04 Fedora 28 - x86_64 34 MB/s | 60 MB 00:01 Last metadata expiration check: 0:00:10 ago on Thu Jul 26 07:31:51 2018. Dependencies resolved. ================================================================================ Package Arch Version Repository Size ================================================================================ Installing: gcc x86_64 8.1.1-5.fc28 updates 23 M git x86_64 2.17.1-3.fc28 updates 221 k make x86_64 1:4.2.1-6.fc28 fedora 497 k Upgrading: libgcc x86_64 8.1.1-5.fc28 updates 95 k Installing dependencies: binutils x86_64 2.29.1-23.fc28 updates 6.0 M cpp x86_64 8.1.1-5.fc28 updates 10 M emacs-filesystem noarch 1:26.1-3.fc28 updates 68 k fipscheck x86_64 1.5.0-4.fc28 fedora 26 k fipscheck-lib x86_64 1.5.0-4.fc28 fedora 14 k gc x86_64 7.6.4-3.fc28 fedora 108 k git-core x86_64 2.17.1-3.fc28 updates 4.0 M git-core-doc noarch 2.17.1-3.fc28 updates 2.3 M glibc-devel x86_64 2.27-8.fc28 fedora 1.0 M glibc-headers x86_64 2.27-8.fc28 fedora 454 k groff-base x86_64 1.22.3-15.fc28 fedora 1.0 M guile x86_64 5:2.0.14-7.fc28 fedora 3.5 M isl x86_64 0.16.1-6.fc28 fedora 841 k kernel-headers x86_64 4.17.7-200.fc28 updates 1.2 M less x86_64 530-1.fc28 fedora 163 k libatomic_ops x86_64 7.6.2-3.fc28 fedora 37 k libedit x86_64 3.1-23.20170329cvs.fc28 fedora 101 k libgomp x86_64 8.1.1-5.fc28 updates 207 k libmpc x86_64 1.0.2-9.fc28 fedora 58 k libpkgconf x86_64 1.4.2-1.fc28 fedora 34 k libsecret x86_64 0.18.6-1.fc28 fedora 162 k libstdc++ x86_64 8.1.1-5.fc28 updates 487 k libtool-ltdl x86_64 2.4.6-24.fc28 updates 57 k libxcrypt-devel x86_64 4.0.0-5.fc28 fedora 15 k ncurses x86_64 6.1-4.20180224.fc28 fedora 377 k openssh x86_64 7.7p1-5.fc28 updates 483 k openssh-clients x86_64 7.7p1-5.fc28 updates 684 k perl-Carp noarch 1.42-396.fc28 updates 29 k perl-Data-Dumper x86_64 2.167-399.fc28 fedora 57 k perl-Digest noarch 1.17-395.fc28 fedora 26 k perl-Digest-MD5 x86_64 2.55-396.fc28 fedora 36 k perl-Encode x86_64 4:2.97-3.fc28 fedora 1.5 M perl-Errno x86_64 1.28-412.fc28 updates 74 k perl-Error noarch 1:0.17025-2.fc28 fedora 45 k perl-Exporter noarch 5.72-396.fc28 fedora 33 k perl-File-Path noarch 2.15-2.fc28 fedora 37 k perl-File-Temp noarch 0.230.600-1.fc28 updates 62 k perl-Getopt-Long noarch 1:2.50-4.fc28 fedora 62 k perl-Git noarch 2.17.1-3.fc28 updates 73 k perl-HTTP-Tiny noarch 0.070-395.fc28 fedora 56 k perl-IO x86_64 1.38-412.fc28 updates 138 k perl-IO-Socket-IP noarch 0.39-5.fc28 fedora 46 k perl-MIME-Base64 x86_64 3.15-396.fc28 fedora 29 k perl-Net-SSLeay x86_64 1.85-1.fc28 fedora 356 k perl-PathTools x86_64 3.74-1.fc28 fedora 89 k perl-Pod-Escapes noarch 1:1.07-395.fc28 fedora 19 k perl-Pod-Perldoc noarch 3.28-396.fc28 fedora 87 k perl-Pod-Simple noarch 1:3.35-395.fc28 fedora 212 k perl-Pod-Usage noarch 4:1.69-395.fc28 fedora 33 k perl-Scalar-List-Utils x86_64 3:1.49-2.fc28 fedora 67 k perl-Socket x86_64 4:2.027-2.fc28 fedora 58 k perl-Storable x86_64 1:3.11-2.fc28 updates 97 k perl-Term-ANSIColor noarch 4.06-396.fc28 fedora 45 k perl-Term-Cap noarch 1.17-395.fc28 fedora 22 k perl-TermReadKey x86_64 2.37-7.fc28 fedora 39 k perl-Text-ParseWords noarch 3.30-395.fc28 fedora 17 k perl-Text-Tabs+Wrap noarch 2013.0523-395.fc28 fedora 23 k perl-Time-Local noarch 1:1.280-1.fc28 updates 32 k perl-URI noarch 1.73-2.fc28 fedora 115 k perl-Unicode-Normalize x86_64 1.25-396.fc28 fedora 81 k perl-constant noarch 1.33-396.fc28 fedora 24 k perl-interpreter x86_64 4:5.26.2-412.fc28 updates 6.2 M perl-libnet noarch 3.11-3.fc28 fedora 120 k perl-libs x86_64 4:5.26.2-412.fc28 updates 1.5 M perl-macros x86_64 4:5.26.2-412.fc28 updates 70 k perl-parent noarch 1:0.236-395.fc28 fedora 19 k perl-podlators noarch 4.11-1.fc28 updates 117 k perl-threads x86_64 1:2.21-2.fc28 fedora 60 k perl-threads-shared x86_64 1.58-2.fc28 fedora 46 k pkgconf x86_64 1.4.2-1.fc28 fedora 37 k pkgconf-m4 noarch 1.4.2-1.fc28 fedora 16 k pkgconf-pkg-config x86_64 1.4.2-1.fc28 fedora 14 k Installing weak dependencies: perl-IO-Socket-SSL noarch 2.056-1.fc28 fedora 285 k perl-Mozilla-CA noarch 20160104-7.fc28 fedora 14 k Transaction Summary ================================================================================ Install 77 Packages Upgrade 1 Package Total download size: 69 M Downloading Packages: (1/78): gc-7.6.4-3.fc28.x86_64.rpm 217 kB/s | 108 kB 00:00 (2/78): libatomic_ops-7.6.2-3.fc28.x86_64.rpm 228 kB/s | 37 kB 00:00 (3/78): make-4.2.1-6.fc28.x86_64.rpm 628 kB/s | 497 kB 00:00 (4/78): guile-2.0.14-7.fc28.x86_64.rpm 2.0 MB/s | 3.5 MB 00:01 (5/78): git-2.17.1-3.fc28.x86_64.rpm 151 kB/s | 221 kB 00:01 (6/78): perl-Git-2.17.1-3.fc28.noarch.rpm 1.6 MB/s | 73 kB 00:00 (7/78): git-core-doc-2.17.1-3.fc28.noarch.rpm 3.2 MB/s | 2.3 MB 00:00 (8/78): libsecret-0.18.6-1.fc28.x86_64.rpm 573 kB/s | 162 kB 00:00 (9/78): perl-Getopt-Long-2.50-4.fc28.noarch.rpm 343 kB/s | 62 kB 00:00 (10/78): perl-TermReadKey-2.37-7.fc28.x86_64.rp 431 kB/s | 39 kB 00:00 (11/78): perl-PathTools-3.74-1.fc28.x86_64.rpm 260 kB/s | 89 kB 00:00 (12/78): git-core-2.17.1-3.fc28.x86_64.rpm 1.9 MB/s | 4.0 MB 00:02 (13/78): perl-Error-0.17025-2.fc28.noarch.rpm 259 kB/s | 45 kB 00:00 (14/78): less-530-1.fc28.x86_64.rpm 639 kB/s | 163 kB 00:00 (15/78): perl-Exporter-5.72-396.fc28.noarch.rpm 181 kB/s | 33 kB 00:00 (16/78): perl-Text-ParseWords-3.30-395.fc28.noa 129 kB/s | 17 kB 00:00 (17/78): perl-Pod-Usage-1.69-395.fc28.noarch.rp 237 kB/s | 33 kB 00:00 (18/78): perl-constant-1.33-396.fc28.noarch.rpm 212 kB/s | 24 kB 00:00 (19/78): perl-Scalar-List-Utils-1.49-2.fc28.x86 438 kB/s | 67 kB 00:00 (20/78): perl-Pod-Perldoc-3.28-396.fc28.noarch. 415 kB/s | 87 kB 00:00 (21/78): perl-HTTP-Tiny-0.070-395.fc28.noarch.r 396 kB/s | 56 kB 00:00 (22/78): perl-parent-0.236-395.fc28.noarch.rpm 200 kB/s | 19 kB 00:00 (23/78): perl-MIME-Base64-3.15-396.fc28.x86_64. 386 kB/s | 29 kB 00:00 (24/78): perl-Pod-Simple-3.35-395.fc28.noarch.r 619 kB/s | 212 kB 00:00 (25/78): perl-Socket-2.027-2.fc28.x86_64.rpm 334 kB/s | 58 kB 00:00 (26/78): perl-Pod-Escapes-1.07-395.fc28.noarch. 151 kB/s | 19 kB 00:00 (27/78): perl-Text-Tabs+Wrap-2013.0523-395.fc28 262 kB/s | 23 kB 00:00 (28/78): groff-base-1.22.3-15.fc28.x86_64.rpm 936 kB/s | 1.0 MB 00:01 (29/78): isl-0.16.1-6.fc28.x86_64.rpm 968 kB/s | 841 kB 00:00 (30/78): cpp-8.1.1-5.fc28.x86_64.rpm 5.6 MB/s | 10 MB 00:01 (31/78): libmpc-1.0.2-9.fc28.x86_64.rpm 127 kB/s | 58 kB 00:00 (32/78): perl-libs-5.26.2-412.fc28.x86_64.rpm 2.8 MB/s | 1.5 MB 00:00 (33/78): perl-File-Path-2.15-2.fc28.noarch.rpm 205 kB/s | 37 kB 00:00 (34/78): perl-interpreter-5.26.2-412.fc28.x86_6 6.5 MB/s | 6.2 MB 00:00 (35/78): perl-Unicode-Normalize-1.25-396.fc28.x 263 kB/s | 81 kB 00:00 (36/78): perl-threads-2.21-2.fc28.x86_64.rpm 455 kB/s | 60 kB 00:00 (37/78): perl-Errno-1.28-412.fc28.x86_64.rpm 1.1 MB/s | 74 kB 00:00 (38/78): perl-threads-shared-1.58-2.fc28.x86_64 358 kB/s | 46 kB 00:00 (39/78): perl-Carp-1.42-396.fc28.noarch.rpm 459 kB/s | 29 kB 00:00 (40/78): perl-podlators-4.11-1.fc28.noarch.rpm 1.0 MB/s | 117 kB 00:00 (41/78): perl-Term-ANSIColor-4.06-396.fc28.noar 227 kB/s | 45 kB 00:00 (42/78): perl-Term-Cap-1.17-395.fc28.noarch.rpm 145 kB/s | 22 kB 00:00 (43/78): ncurses-6.1-4.20180224.fc28.x86_64.rpm 660 kB/s | 377 kB 00:00 (44/78): perl-File-Temp-0.230.600-1.fc28.noarch 600 kB/s | 62 kB 00:00 (45/78): perl-IO-1.38-412.fc28.x86_64.rpm 1.4 MB/s | 138 kB 00:00 (46/78): perl-Time-Local-1.280-1.fc28.noarch.rp 548 kB/s | 32 kB 00:00 (47/78): perl-Storable-3.11-2.fc28.x86_64.rpm 2.0 MB/s | 97 kB 00:00 (48/78): libtool-ltdl-2.4.6-24.fc28.x86_64.rpm 1.0 MB/s | 57 kB 00:00 (49/78): libstdc++-8.1.1-5.fc28.x86_64.rpm 4.0 MB/s | 487 kB 00:00 (50/78): perl-macros-5.26.2-412.fc28.x86_64.rpm 1.4 MB/s | 70 kB 00:00 (51/78): openssh-clients-7.7p1-5.fc28.x86_64.rp 4.5 MB/s | 684 kB 00:00 (52/78): perl-Encode-2.97-3.fc28.x86_64.rpm 1.1 MB/s | 1.5 MB 00:01 (53/78): openssh-7.7p1-5.fc28.x86_64.rpm 3.1 MB/s | 483 kB 00:00 (54/78): fipscheck-lib-1.5.0-4.fc28.x86_64.rpm 92 kB/s | 14 kB 00:00 (55/78): libedit-3.1-23.20170329cvs.fc28.x86_64 692 kB/s | 101 kB 00:00 (56/78): emacs-filesystem-26.1-3.fc28.noarch.rp 1.4 MB/s | 68 kB 00:00 (57/78): fipscheck-1.5.0-4.fc28.x86_64.rpm 232 kB/s | 26 kB 00:00 (58/78): gcc-8.1.1-5.fc28.x86_64.rpm 3.8 MB/s | 23 MB 00:05 (59/78): glibc-devel-2.27-8.fc28.x86_64.rpm 961 kB/s | 1.0 MB 00:01 (60/78): libxcrypt-devel-4.0.0-5.fc28.x86_64.rp 129 kB/s | 15 kB 00:00 (61/78): libgomp-8.1.1-5.fc28.x86_64.rpm 2.2 MB/s | 207 kB 00:00 (62/78): binutils-2.29.1-23.fc28.x86_64.rpm 4.4 MB/s | 6.0 MB 00:01 (63/78): glibc-headers-2.27-8.fc28.x86_64.rpm 1.5 MB/s | 454 kB 00:00 (64/78): pkgconf-pkg-config-1.4.2-1.fc28.x86_64 153 kB/s | 14 kB 00:00 (65/78): pkgconf-m4-1.4.2-1.fc28.noarch.rpm 160 kB/s | 16 kB 00:00 (66/78): libpkgconf-1.4.2-1.fc28.x86_64.rpm 185 kB/s | 34 kB 00:00 (67/78): pkgconf-1.4.2-1.fc28.x86_64.rpm 179 kB/s | 37 kB 00:00 (68/78): perl-Mozilla-CA-20160104-7.fc28.noarch 104 kB/s | 14 kB 00:00 (69/78): kernel-headers-4.17.7-200.fc28.x86_64. 3.8 MB/s | 1.2 MB 00:00 (70/78): perl-IO-Socket-IP-0.39-5.fc28.noarch.r 213 kB/s | 46 kB 00:00 (71/78): perl-IO-Socket-SSL-2.056-1.fc28.noarch 667 kB/s | 285 kB 00:00 (72/78): perl-Data-Dumper-2.167-399.fc28.x86_64 688 kB/s | 57 kB 00:00 (73/78): perl-libnet-3.11-3.fc28.noarch.rpm 892 kB/s | 120 kB 00:00 (74/78): perl-URI-1.73-2.fc28.noarch.rpm 383 kB/s | 115 kB 00:00 (75/78): perl-Digest-MD5-2.55-396.fc28.x86_64.r 471 kB/s | 36 kB 00:00 (76/78): perl-Digest-1.17-395.fc28.noarch.rpm 222 kB/s | 26 kB 00:00 (77/78): libgcc-8.1.1-5.fc28.x86_64.rpm 743 kB/s | 95 kB 00:00 (78/78): perl-Net-SSLeay-1.85-1.fc28.x86_64.rpm 573 kB/s | 356 kB 00:00 -------------------------------------------------------------------------------- Total 5.8 MB/s | 69 MB 00:11 Running transaction check Transaction check succeeded. Running transaction test Transaction test succeeded. Running transaction Preparing : 1/1 Installing : perl-libs-4:5.26.2-412.fc28.x86_64 1/79 Installing : perl-Carp-1.42-396.fc28.noarch 2/79 Installing : perl-Exporter-5.72-396.fc28.noarch 3/79 Installing : perl-Scalar-List-Utils-3:1.49-2.fc28.x86_64 4/79 Upgrading : libgcc-8.1.1-5.fc28.x86_64 5/79 Running scriptlet: libgcc-8.1.1-5.fc28.x86_64 5/79 Installing : fipscheck-1.5.0-4.fc28.x86_64 6/79 Installing : fipscheck-lib-1.5.0-4.fc28.x86_64 7/79 Running scriptlet: fipscheck-lib-1.5.0-4.fc28.x86_64 7/79 Installing : libstdc++-8.1.1-5.fc28.x86_64 8/79 Running scriptlet: libstdc++-8.1.1-5.fc28.x86_64 8/79 Installing : perl-Text-ParseWords-3.30-395.fc28.noarch 9/79 Installing : libmpc-1.0.2-9.fc28.x86_64 10/79 Running scriptlet: libmpc-1.0.2-9.fc28.x86_64 10/79 Installing : cpp-8.1.1-5.fc28.x86_64 11/79 Running scriptlet: cpp-8.1.1-5.fc28.x86_64 11/79 Installing : groff-base-1.22.3-15.fc28.x86_64 12/79 Running scriptlet: openssh-7.7p1-5.fc28.x86_64 13/79 Installing : openssh-7.7p1-5.fc28.x86_64 13/79 Installing : perl-Term-ANSIColor-4.06-396.fc28.noarch 14/79 Installing : perl-macros-4:5.26.2-412.fc28.x86_64 15/79 Installing : perl-constant-1.33-396.fc28.noarch 16/79 Installing : perl-parent-1:0.236-395.fc28.noarch 17/79 Installing : perl-Socket-4:2.027-2.fc28.x86_64 18/79 Installing : perl-Text-Tabs+Wrap-2013.0523-395.fc28.noarch 19/79 Installing : perl-File-Path-2.15-2.fc28.noarch 20/79 Installing : perl-Unicode-Normalize-1.25-396.fc28.x86_64 21/79 Installing : perl-threads-shared-1.58-2.fc28.x86_64 22/79 Installing : perl-threads-1:2.21-2.fc28.x86_64 23/79 Installing : perl-Errno-1.28-412.fc28.x86_64 24/79 Installing : perl-PathTools-3.74-1.fc28.x86_64 25/79 Installing : perl-interpreter-4:5.26.2-412.fc28.x86_64 26/79 Installing : perl-IO-1.38-412.fc28.x86_64 27/79 Installing : perl-MIME-Base64-3.15-396.fc28.x86_64 28/79 Installing : perl-Time-Local-1:1.280-1.fc28.noarch 29/79 Installing : perl-HTTP-Tiny-0.070-395.fc28.noarch 30/79 Installing : perl-File-Temp-0.230.600-1.fc28.noarch 31/79 Installing : perl-IO-Socket-IP-0.39-5.fc28.noarch 32/79 Installing : perl-Net-SSLeay-1.85-1.fc28.x86_64 33/79 Installing : perl-Digest-1.17-395.fc28.noarch 34/79 Installing : perl-Digest-MD5-2.55-396.fc28.x86_64 35/79 Installing : perl-libnet-3.11-3.fc28.noarch 36/79 Installing : perl-Storable-1:3.11-2.fc28.x86_64 37/79 Installing : perl-TermReadKey-2.37-7.fc28.x86_64 38/79 Installing : perl-Error-1:0.17025-2.fc28.noarch 39/79 Installing : perl-Pod-Escapes-1:1.07-395.fc28.noarch 40/79 Installing : perl-Data-Dumper-2.167-399.fc28.x86_64 41/79 Installing : kernel-headers-4.17.7-200.fc28.x86_64 42/79 Running scriptlet: glibc-headers-2.27-8.fc28.x86_64 43/79 Installing : glibc-headers-2.27-8.fc28.x86_64 43/79 Installing : libpkgconf-1.4.2-1.fc28.x86_64 44/79 Installing : pkgconf-1.4.2-1.fc28.x86_64 45/79 Installing : pkgconf-m4-1.4.2-1.fc28.noarch 46/79 Installing : pkgconf-pkg-config-1.4.2-1.fc28.x86_64 47/79 Installing : libxcrypt-devel-4.0.0-5.fc28.x86_64 48/79 Installing : glibc-devel-2.27-8.fc28.x86_64 49/79 Running scriptlet: glibc-devel-2.27-8.fc28.x86_64 49/79 Installing : libgomp-8.1.1-5.fc28.x86_64 50/79 Running scriptlet: libgomp-8.1.1-5.fc28.x86_64 50/79 Installing : binutils-2.29.1-23.fc28.x86_64 51/79 Running scriptlet: binutils-2.29.1-23.fc28.x86_64 51/79 install-info: No such file or directory for /usr/share/info/as.info.gz install-info: No such file or directory for /usr/share/info/binutils.info.gz install-info: No such file or directory for /usr/share/info/gprof.info.gz install-info: No such file or directory for /usr/share/info/ld.info.gz Installing : emacs-filesystem-1:26.1-3.fc28.noarch 52/79 Installing : libedit-3.1-23.20170329cvs.fc28.x86_64 53/79 Installing : openssh-clients-7.7p1-5.fc28.x86_64 54/79 Installing : libtool-ltdl-2.4.6-24.fc28.x86_64 55/79 Running scriptlet: libtool-ltdl-2.4.6-24.fc28.x86_64 55/79 Installing : ncurses-6.1-4.20180224.fc28.x86_64 56/79 Installing : perl-Term-Cap-1.17-395.fc28.noarch 57/79 Installing : perl-Pod-Simple-1:3.35-395.fc28.noarch 58/79 Installing : perl-Pod-Usage-4:1.69-395.fc28.noarch 59/79 Installing : perl-Getopt-Long-1:2.50-4.fc28.noarch 60/79 Installing : perl-Encode-4:2.97-3.fc28.x86_64 61/79 Installing : perl-podlators-4.11-1.fc28.noarch 62/79 Installing : perl-Pod-Perldoc-3.28-396.fc28.noarch 63/79 Installing : perl-URI-1.73-2.fc28.noarch 64/79 Installing : isl-0.16.1-6.fc28.x86_64 65/79 Running scriptlet: isl-0.16.1-6.fc28.x86_64 65/79 Installing : less-530-1.fc28.x86_64 66/79 Installing : git-core-2.17.1-3.fc28.x86_64 67/79 Installing : git-core-doc-2.17.1-3.fc28.noarch 68/79 Installing : libsecret-0.18.6-1.fc28.x86_64 69/79 Installing : perl-Git-2.17.1-3.fc28.noarch 70/79 Installing : git-2.17.1-3.fc28.x86_64 71/79 Installing : libatomic_ops-7.6.2-3.fc28.x86_64 72/79 Installing : gc-7.6.4-3.fc28.x86_64 73/79 Installing : guile-5:2.0.14-7.fc28.x86_64 74/79 Running scriptlet: guile-5:2.0.14-7.fc28.x86_64 74/79 Installing : make-1:4.2.1-6.fc28.x86_64 75/79 Running scriptlet: make-1:4.2.1-6.fc28.x86_64 75/79 Installing : gcc-8.1.1-5.fc28.x86_64 76/79 Running scriptlet: gcc-8.1.1-5.fc28.x86_64 76/79 Installing : perl-IO-Socket-SSL-2.056-1.fc28.noarch 77/79 Installing : perl-Mozilla-CA-20160104-7.fc28.noarch 78/79 Cleanup : libgcc-8.0.1-0.20.fc28.x86_64 79/79 Running scriptlet: libgcc-8.0.1-0.20.fc28.x86_64 79/79 Running scriptlet: guile-5:2.0.14-7.fc28.x86_64 79/79 Running scriptlet: libgcc-8.0.1-0.20.fc28.x86_64 79/79 Verifying : make-1:4.2.1-6.fc28.x86_64 1/79 Verifying : gc-7.6.4-3.fc28.x86_64 2/79 Verifying : guile-5:2.0.14-7.fc28.x86_64 3/79 Verifying : libatomic_ops-7.6.2-3.fc28.x86_64 4/79 Verifying : git-2.17.1-3.fc28.x86_64 5/79 Verifying : git-core-2.17.1-3.fc28.x86_64 6/79 Verifying : git-core-doc-2.17.1-3.fc28.noarch 7/79 Verifying : perl-Git-2.17.1-3.fc28.noarch 8/79 Verifying : libsecret-0.18.6-1.fc28.x86_64 9/79 Verifying : perl-Getopt-Long-1:2.50-4.fc28.noarch 10/79 Verifying : perl-PathTools-3.74-1.fc28.x86_64 11/79 Verifying : perl-TermReadKey-2.37-7.fc28.x86_64 12/79 Verifying : less-530-1.fc28.x86_64 13/79 Verifying : perl-Error-1:0.17025-2.fc28.noarch 14/79 Verifying : perl-Exporter-5.72-396.fc28.noarch 15/79 Verifying : perl-Pod-Usage-4:1.69-395.fc28.noarch 16/79 Verifying : perl-Text-ParseWords-3.30-395.fc28.noarch 17/79 Verifying : perl-constant-1.33-396.fc28.noarch 18/79 Verifying : perl-Scalar-List-Utils-3:1.49-2.fc28.x86_64 19/79 Verifying : perl-Pod-Perldoc-3.28-396.fc28.noarch 20/79 Verifying : groff-base-1.22.3-15.fc28.x86_64 21/79 Verifying : perl-HTTP-Tiny-0.070-395.fc28.noarch 22/79 Verifying : perl-Pod-Simple-1:3.35-395.fc28.noarch 23/79 Verifying : perl-parent-1:0.236-395.fc28.noarch 24/79 Verifying : perl-MIME-Base64-3.15-396.fc28.x86_64 25/79 Verifying : perl-Socket-4:2.027-2.fc28.x86_64 26/79 Verifying : perl-Pod-Escapes-1:1.07-395.fc28.noarch 27/79 Verifying : perl-Text-Tabs+Wrap-2013.0523-395.fc28.noarch 28/79 Verifying : gcc-8.1.1-5.fc28.x86_64 29/79 Verifying : cpp-8.1.1-5.fc28.x86_64 30/79 Verifying : isl-0.16.1-6.fc28.x86_64 31/79 Verifying : libmpc-1.0.2-9.fc28.x86_64 32/79 Verifying : perl-interpreter-4:5.26.2-412.fc28.x86_64 33/79 Verifying : perl-libs-4:5.26.2-412.fc28.x86_64 34/79 Verifying : perl-File-Path-2.15-2.fc28.noarch 35/79 Verifying : perl-Unicode-Normalize-1.25-396.fc28.x86_64 36/79 Verifying : perl-threads-1:2.21-2.fc28.x86_64 37/79 Verifying : perl-threads-shared-1.58-2.fc28.x86_64 38/79 Verifying : perl-Errno-1.28-412.fc28.x86_64 39/79 Verifying : perl-Carp-1.42-396.fc28.noarch 40/79 Verifying : perl-podlators-4.11-1.fc28.noarch 41/79 Verifying : perl-Term-ANSIColor-4.06-396.fc28.noarch 42/79 Verifying : perl-Term-Cap-1.17-395.fc28.noarch 43/79 Verifying : ncurses-6.1-4.20180224.fc28.x86_64 44/79 Verifying : perl-Encode-4:2.97-3.fc28.x86_64 45/79 Verifying : perl-File-Temp-0.230.600-1.fc28.noarch 46/79 Verifying : perl-IO-1.38-412.fc28.x86_64 47/79 Verifying : perl-Time-Local-1:1.280-1.fc28.noarch 48/79 Verifying : perl-Storable-1:3.11-2.fc28.x86_64 49/79 Verifying : libtool-ltdl-2.4.6-24.fc28.x86_64 50/79 Verifying : libstdc++-8.1.1-5.fc28.x86_64 51/79 Verifying : perl-macros-4:5.26.2-412.fc28.x86_64 52/79 Verifying : openssh-clients-7.7p1-5.fc28.x86_64 53/79 Verifying : openssh-7.7p1-5.fc28.x86_64 54/79 Verifying : fipscheck-lib-1.5.0-4.fc28.x86_64 55/79 Verifying : libedit-3.1-23.20170329cvs.fc28.x86_64 56/79 Verifying : fipscheck-1.5.0-4.fc28.x86_64 57/79 Verifying : emacs-filesystem-1:26.1-3.fc28.noarch 58/79 Verifying : binutils-2.29.1-23.fc28.x86_64 59/79 Verifying : glibc-devel-2.27-8.fc28.x86_64 60/79 Verifying : libxcrypt-devel-4.0.0-5.fc28.x86_64 61/79 Verifying : glibc-headers-2.27-8.fc28.x86_64 62/79 Verifying : libgomp-8.1.1-5.fc28.x86_64 63/79 Verifying : pkgconf-pkg-config-1.4.2-1.fc28.x86_64 64/79 Verifying : pkgconf-1.4.2-1.fc28.x86_64 65/79 Verifying : pkgconf-m4-1.4.2-1.fc28.noarch 66/79 Verifying : libpkgconf-1.4.2-1.fc28.x86_64 67/79 Verifying : kernel-headers-4.17.7-200.fc28.x86_64 68/79 Verifying : perl-Mozilla-CA-20160104-7.fc28.noarch 69/79 Verifying : perl-IO-Socket-SSL-2.056-1.fc28.noarch 70/79 Verifying : perl-IO-Socket-IP-0.39-5.fc28.noarch 71/79 Verifying : perl-Net-SSLeay-1.85-1.fc28.x86_64 72/79 Verifying : perl-URI-1.73-2.fc28.noarch 73/79 Verifying : perl-Data-Dumper-2.167-399.fc28.x86_64 74/79 Verifying : perl-libnet-3.11-3.fc28.noarch 75/79 Verifying : perl-Digest-MD5-2.55-396.fc28.x86_64 76/79 Verifying : perl-Digest-1.17-395.fc28.noarch 77/79 Verifying : libgcc-8.1.1-5.fc28.x86_64 78/79 Verifying : libgcc-8.0.1-0.20.fc28.x86_64 79/79 Installed: gcc.x86_64 8.1.1-5.fc28 git.x86_64 2.17.1-3.fc28 make.x86_64 1:4.2.1-6.fc28 perl-IO-Socket-SSL.noarch 2.056-1.fc28 perl-Mozilla-CA.noarch 20160104-7.fc28 binutils.x86_64 2.29.1-23.fc28 cpp.x86_64 8.1.1-5.fc28 emacs-filesystem.noarch 1:26.1-3.fc28 fipscheck.x86_64 1.5.0-4.fc28 fipscheck-lib.x86_64 1.5.0-4.fc28 gc.x86_64 7.6.4-3.fc28 git-core.x86_64 2.17.1-3.fc28 git-core-doc.noarch 2.17.1-3.fc28 glibc-devel.x86_64 2.27-8.fc28 glibc-headers.x86_64 2.27-8.fc28 groff-base.x86_64 1.22.3-15.fc28 guile.x86_64 5:2.0.14-7.fc28 isl.x86_64 0.16.1-6.fc28 kernel-headers.x86_64 4.17.7-200.fc28 less.x86_64 530-1.fc28 libatomic_ops.x86_64 7.6.2-3.fc28 libedit.x86_64 3.1-23.20170329cvs.fc28 libgomp.x86_64 8.1.1-5.fc28 libmpc.x86_64 1.0.2-9.fc28 libpkgconf.x86_64 1.4.2-1.fc28 libsecret.x86_64 0.18.6-1.fc28 libstdc++.x86_64 8.1.1-5.fc28 libtool-ltdl.x86_64 2.4.6-24.fc28 libxcrypt-devel.x86_64 4.0.0-5.fc28 ncurses.x86_64 6.1-4.20180224.fc28 openssh.x86_64 7.7p1-5.fc28 openssh-clients.x86_64 7.7p1-5.fc28 perl-Carp.noarch 1.42-396.fc28 perl-Data-Dumper.x86_64 2.167-399.fc28 perl-Digest.noarch 1.17-395.fc28 perl-Digest-MD5.x86_64 2.55-396.fc28 perl-Encode.x86_64 4:2.97-3.fc28 perl-Errno.x86_64 1.28-412.fc28 perl-Error.noarch 1:0.17025-2.fc28 perl-Exporter.noarch 5.72-396.fc28 perl-File-Path.noarch 2.15-2.fc28 perl-File-Temp.noarch 0.230.600-1.fc28 perl-Getopt-Long.noarch 1:2.50-4.fc28 perl-Git.noarch 2.17.1-3.fc28 perl-HTTP-Tiny.noarch 0.070-395.fc28 perl-IO.x86_64 1.38-412.fc28 perl-IO-Socket-IP.noarch 0.39-5.fc28 perl-MIME-Base64.x86_64 3.15-396.fc28 perl-Net-SSLeay.x86_64 1.85-1.fc28 perl-PathTools.x86_64 3.74-1.fc28 perl-Pod-Escapes.noarch 1:1.07-395.fc28 perl-Pod-Perldoc.noarch 3.28-396.fc28 perl-Pod-Simple.noarch 1:3.35-395.fc28 perl-Pod-Usage.noarch 4:1.69-395.fc28 perl-Scalar-List-Utils.x86_64 3:1.49-2.fc28 perl-Socket.x86_64 4:2.027-2.fc28 perl-Storable.x86_64 1:3.11-2.fc28 perl-Term-ANSIColor.noarch 4.06-396.fc28 perl-Term-Cap.noarch 1.17-395.fc28 perl-TermReadKey.x86_64 2.37-7.fc28 perl-Text-ParseWords.noarch 3.30-395.fc28 perl-Text-Tabs+Wrap.noarch 2013.0523-395.fc28 perl-Time-Local.noarch 1:1.280-1.fc28 perl-URI.noarch 1.73-2.fc28 perl-Unicode-Normalize.x86_64 1.25-396.fc28 perl-constant.noarch 1.33-396.fc28 perl-interpreter.x86_64 4:5.26.2-412.fc28 perl-libnet.noarch 3.11-3.fc28 perl-libs.x86_64 4:5.26.2-412.fc28 perl-macros.x86_64 4:5.26.2-412.fc28 perl-parent.noarch 1:0.236-395.fc28 perl-podlators.noarch 4.11-1.fc28 perl-threads.x86_64 1:2.21-2.fc28 perl-threads-shared.x86_64 1.58-2.fc28 pkgconf.x86_64 1.4.2-1.fc28 pkgconf-m4.noarch 1.4.2-1.fc28 pkgconf-pkg-config.x86_64 1.4.2-1.fc28 Upgraded: libgcc.x86_64 8.1.1-5.fc28 Complete! 18 files removed ---> 6050b24a5d85 Removing intermediate container 02a8f5f5b75f Step 5/9 : ENV GIMME_GO_VERSION 1.9.2 ---> Running in e69b8fc868d2 ---> 0447d2178073 Removing intermediate container e69b8fc868d2 Step 6/9 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Running in 02c12a468b9e  ---> 291db82d955f Removing intermediate container 02c12a468b9e Step 7/9 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Running in 9b4795f1f126 ---> 793556477837 Removing intermediate container 9b4795f1f126 Step 8/9 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/masterzen/winrm-cli ---> Running in 135fd92f2291 go version go1.9.2 linux/amd64  ---> fd5c6e1f9461 Removing intermediate container 135fd92f2291 Step 9/9 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "winrmcli" '' ---> Running in 56e187d0944e ---> 91d1be1bcbe4 Removing intermediate container 56e187d0944e Successfully built 91d1be1bcbe4 Sending build context to Docker daemon 36.77 MB Step 1/5 : FROM fedora:27 Trying to pull repository docker.io/library/fedora ... 27: Pulling from docker.io/library/fedora 2176639d844b: Pulling fs layer 2176639d844b: Verifying Checksum 2176639d844b: Download complete 2176639d844b: Pull complete Digest: sha256:ec588fc80b05e19d3006bf2e8aa325f0a2e2ff1f609b7afb39176ca8e3e13467 Status: Downloaded newer image for docker.io/fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Running in 1543643a3342 ---> 71a8c548e503 Removing intermediate container 1543643a3342 Step 3/5 : COPY example-hook-sidecar /example-hook-sidecar ---> 59c0ff4082fa Removing intermediate container 191a82cabeb4 Step 4/5 : ENTRYPOINT /example-hook-sidecar ---> Running in de22e3397360 ---> cc91e4c7352e Removing intermediate container de22e3397360 Step 5/5 : LABEL "example-hook-sidecar" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in 6ace512c53b8 ---> 865977f93056 Removing intermediate container 6ace512c53b8 Successfully built 865977f93056 hack/build-docker.sh push The push refers to a repository [localhost:32816/kubevirt/virt-controller] 24e31bc3c6f1: Preparing b2f5abdac324: Preparing 891e1e4ef82a: Preparing b2f5abdac324: Pushed 24e31bc3c6f1: Pushed 891e1e4ef82a: Pushed devel: digest: sha256:c1124e63cef3099839465641a575314c3acb4f983bcd515f4b93b66bdfe219a2 size: 949 The push refers to a repository [localhost:32816/kubevirt/virt-launcher] ecf458511e2b: Preparing 9b0ccdd79831: Preparing 1f66c09a7c1e: Preparing 33ce55367654: Preparing 768846e69f95: Preparing 0b99c4111657: Preparing da38cf808aa5: Preparing b83399358a92: Preparing 186d8b3e4fd8: Preparing da38cf808aa5: Waiting b83399358a92: Waiting 186d8b3e4fd8: Waiting fa6154170bf5: Preparing 5eefb9960a36: Preparing 891e1e4ef82a: Preparing fa6154170bf5: Waiting 5eefb9960a36: Waiting 891e1e4ef82a: Waiting 9b0ccdd79831: Pushed 33ce55367654: Pushed ecf458511e2b: Pushed da38cf808aa5: Pushed b83399358a92: Pushed 186d8b3e4fd8: Pushed fa6154170bf5: Pushed 891e1e4ef82a: Mounted from kubevirt/virt-controller 1f66c09a7c1e: Pushed 0b99c4111657: Pushed 768846e69f95: Pushed 5eefb9960a36: Pushed devel: digest: sha256:493e6ad1c790dee2425410039f3c3b95836ef42472c211ae8a9e3a4cdd61b7c5 size: 2828 The push refers to a repository [localhost:32816/kubevirt/virt-handler] 42fc06c09518: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-launcher 42fc06c09518: Pushed devel: digest: sha256:c3bd15445616c08b8b0b7b01da1c764f9269061af8a4c80e7fdcc82207b87b58 size: 741 The push refers to a repository [localhost:32816/kubevirt/virt-api] 566125a4e16a: Preparing afd1d781e4d1: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-handler afd1d781e4d1: Pushed 566125a4e16a: Pushed devel: digest: sha256:505ca7f497a0c5bae30320686324a221f2ebdc831f978e2105b6ee9c307c17b4 size: 948 The push refers to a repository [localhost:32816/kubevirt/disks-images-provider] dc0875c44573: Preparing 8fc77a44094f: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-api dc0875c44573: Pushed 8fc77a44094f: Pushed devel: digest: sha256:d23d8d42ec6e15ae7ed6e778918aafb30b1527dcab703a192077860ecf796c74 size: 948 The push refers to a repository [localhost:32816/kubevirt/vm-killer] d1b69e768421: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/disks-images-provider d1b69e768421: Pushed devel: digest: sha256:e18b0719b6c92415bd3a9d4e45278bb4a4f7bccefbd3fe8c958aad9b913bc32c size: 740 The push refers to a repository [localhost:32816/kubevirt/registry-disk-v1alpha] 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 2a15632f54d4: Pushed 91a924e03d7c: Pushed 25edbec0eaea: Pushed devel: digest: sha256:93dbd4b6c598eae77e68f8119e129d092b75cfe0573a46c653a4578391b54edd size: 948 The push refers to a repository [localhost:32816/kubevirt/cirros-registry-disk-demo] f287bddc58c9: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 91a924e03d7c: Mounted from kubevirt/registry-disk-v1alpha 25edbec0eaea: Mounted from kubevirt/registry-disk-v1alpha 2a15632f54d4: Mounted from kubevirt/registry-disk-v1alpha f287bddc58c9: Pushed devel: digest: sha256:d84ec6e1c3b1e790318b351a867571430b0f77022b609bf72c7edc11774869a2 size: 1160 The push refers to a repository [localhost:32816/kubevirt/fedora-cloud-registry-disk-demo] 191bddb21627: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 2a15632f54d4: Mounted from kubevirt/cirros-registry-disk-demo 91a924e03d7c: Mounted from kubevirt/cirros-registry-disk-demo 25edbec0eaea: Mounted from kubevirt/cirros-registry-disk-demo 191bddb21627: Pushed devel: digest: sha256:721c5dc3b73e50b865b6d395e48884382c391509e18b4d77a3a27456a1eea65c size: 1161 The push refers to a repository [localhost:32816/kubevirt/alpine-registry-disk-demo] 8a362b640dc9: Preparing 2a15632f54d4: Preparing 91a924e03d7c: Preparing 25edbec0eaea: Preparing 91a924e03d7c: Mounted from kubevirt/fedora-cloud-registry-disk-demo 25edbec0eaea: Mounted from kubevirt/fedora-cloud-registry-disk-demo 2a15632f54d4: Mounted from kubevirt/fedora-cloud-registry-disk-demo 8a362b640dc9: Pushed devel: digest: sha256:6c9639e0cb8ed67572ed78aad285cce752608f39802ce49856474162feae16f5 size: 1160 The push refers to a repository [localhost:32816/kubevirt/subresource-access-test] 90a6fa28ccf9: Preparing 4052ce9d0aff: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/vm-killer 4052ce9d0aff: Pushed 90a6fa28ccf9: Pushed devel: digest: sha256:0bfe30fd5e619cc16b5b68abcfb1f56214f3deecb887ac299dee28e78491349f size: 948 The push refers to a repository [localhost:32816/kubevirt/winrmcli] 64ccc7ac4271: Preparing 4242962b50c3: Preparing 0e374d8c733e: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/subresource-access-test 64ccc7ac4271: Pushed 0e374d8c733e: Pushed 4242962b50c3: Pushed devel: digest: sha256:7ba212e34e7bbac39ae9d54624462c338a98987d0eb9f59f8bb24b123847d8b4 size: 1165 The push refers to a repository [localhost:32816/kubevirt/example-hook-sidecar] fd558746be3e: Preparing 39bae602f753: Preparing fd558746be3e: Pushed 39bae602f753: Pushed devel: digest: sha256:d2dc36d4155d9c8abb562277e17bf7366b3c0b488f02a738cf4525feb0114f74 size: 740 make[1]: Leaving directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' Done ./cluster/clean.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-127-g1c7464a ++ KUBEVIRT_VERSION=v0.7.0-127-g1c7464a + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:50a4b8ee3e07d592e7e4fbf3eb1401980a5947499dfdc3d847c085b5775aaa9a ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32816/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Cleaning up ...' Cleaning up ... + cluster/kubectl.sh get vmis --all-namespaces -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,FINALIZERS:.metadata.finalizers --no-headers + read p + grep foregroundDeleteVirtualMachine error: the server doesn't have a resource type "vmis" + _kubectl delete ds -l kubevirt.io -n kube-system --cascade=false --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=libvirt --force --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=virt-handler --force --grace-period 0 No resources found + namespaces=(default ${namespace}) + for i in '${namespaces[@]}' + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete deployment -l kubevirt.io No resources found + _kubectl -n default delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rs -l kubevirt.io No resources found + _kubectl -n default delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete services -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n default delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete secrets -l kubevirt.io No resources found + _kubectl -n default delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pv -l kubevirt.io No resources found + _kubectl -n default delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pvc -l kubevirt.io No resources found + _kubectl -n default delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete ds -l kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n default delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pods -l kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n default delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rolebinding -l kubevirt.io No resources found + _kubectl -n default delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete roles -l kubevirt.io No resources found + _kubectl -n default delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterroles -l kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ wc -l ++ cluster/os-3.10.0/.kubectl -n default get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + for i in '${namespaces[@]}' + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete deployment -l kubevirt.io No resources found + _kubectl -n kube-system delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rs -l kubevirt.io No resources found + _kubectl -n kube-system delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete services -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n kube-system delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete secrets -l kubevirt.io No resources found + _kubectl -n kube-system delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pv -l kubevirt.io No resources found + _kubectl -n kube-system delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pvc -l kubevirt.io No resources found + _kubectl -n kube-system delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete ds -l kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n kube-system delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pods -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete roles -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterroles -l kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + sleep 2 + echo Done Done ./cluster/deploy.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-127-g1c7464a ++ KUBEVIRT_VERSION=v0.7.0-127-g1c7464a + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:50a4b8ee3e07d592e7e4fbf3eb1401980a5947499dfdc3d847c085b5775aaa9a ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32816/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Deploying ...' Deploying ... + [[ -z openshift-3.10-release ]] + [[ openshift-3.10-release =~ .*-dev ]] + [[ openshift-3.10-release =~ .*-release ]] + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/demo-content.yaml =~ .*demo.* ]] + continue + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml =~ .*demo.* ]] + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml clusterrole.rbac.authorization.k8s.io "kubevirt.io:admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:edit" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:view" created serviceaccount "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver-auth-delegator" created rolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created role.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-controller" created serviceaccount "kubevirt-controller" created serviceaccount "kubevirt-privileged" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller-cluster-admin" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-privileged-cluster-admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:default" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt.io:default" created service "virt-api" created deployment.extensions "virt-api" created deployment.extensions "virt-controller" created daemonset.extensions "virt-handler" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstances.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancereplicasets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancepresets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachines.kubevirt.io" created + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R persistentvolumeclaim "disk-alpine" created persistentvolume "host-path-disk-alpine" created persistentvolumeclaim "disk-custom" created persistentvolume "host-path-disk-custom" created daemonset.extensions "disks-images-provider" created serviceaccount "kubevirt-testing" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-testing-cluster-admin" created + [[ os-3.10.0 =~ os-* ]] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-controller"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-testing"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-privileged"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-apiserver"] + _kubectl adm policy add-scc-to-user privileged admin + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged admin scc "privileged" added to: ["admin"] + echo Done Done + namespaces=(kube-system default) + [[ kube-system != \k\u\b\e\-\s\y\s\t\e\m ]] + timeout=300 + sample=30 + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'disks-images-provider-gm64z 0/1 Pending 0 0s disks-images-provider-hvkkw 0/1 ContainerCreating 0 0s virt-api-7d79764579-mtd7c 0/1 ContainerCreating 0 2s virt-api-7d79764579-szt65 0/1 ContainerCreating 0 2s virt-controller-7d57d96b65-8jbq9 0/1 ContainerCreating 0 2s virt-controller-7d57d96b65-ll9d9 0/1 ContainerCreating 0 2s virt-handler-cnhd8 0/1 ContainerCreating 0 2s virt-handler-zwtd9 0/1 ContainerCreating 0 2s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + cluster/kubectl.sh get pods -n kube-system --no-headers + grep -v Running disks-images-provider-gm64z 0/1 ContainerCreating 0 1s disks-images-provider-hvkkw 0/1 ContainerCreating 0 1s virt-api-7d79764579-mtd7c 0/1 ContainerCreating 0 3s virt-api-7d79764579-szt65 0/1 ContainerCreating 0 3s virt-controller-7d57d96b65-8jbq9 0/1 ContainerCreating 0 3s virt-controller-7d57d96b65-ll9d9 0/1 ContainerCreating 0 3s virt-handler-cnhd8 0/1 ContainerCreating 0 3s virt-handler-zwtd9 0/1 ContainerCreating 0 3s + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false + '[' -n 'false false' ']' + echo 'Waiting for KubeVirt containers to become ready ...' Waiting for KubeVirt containers to become ready ... + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + grep false + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers false false + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n kube-system + cluster/kubectl.sh get pods -n kube-system NAME READY STATUS RESTARTS AGE disks-images-provider-gm64z 1/1 Running 0 1m disks-images-provider-hvkkw 1/1 Running 0 1m master-api-node01 1/1 Running 1 18d master-controllers-node01 1/1 Running 1 18d master-etcd-node01 1/1 Running 1 18d virt-api-7d79764579-mtd7c 1/1 Running 0 1m virt-api-7d79764579-szt65 1/1 Running 1 1m virt-controller-7d57d96b65-8jbq9 1/1 Running 0 1m virt-controller-7d57d96b65-ll9d9 1/1 Running 0 1m virt-handler-cnhd8 1/1 Running 0 1m virt-handler-zwtd9 1/1 Running 0 1m + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n default --no-headers ++ grep -v Running ++ cluster/kubectl.sh get pods -n default --no-headers + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ cluster/kubectl.sh get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false + '[' -n '' ']' + kubectl get pods -n default + cluster/kubectl.sh get pods -n default NAME READY STATUS RESTARTS AGE docker-registry-1-rl562 1/1 Running 2 18d registry-console-1-rw9zf 1/1 Running 1 18d router-1-6cch9 1/1 Running 1 18d + kubectl version + cluster/kubectl.sh version oc v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 features: Basic-Auth GSSAPI Kerberos SPNEGO Server https://127.0.0.1:32813 openshift v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 + ginko_params='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + [[ openshift-3.10-release =~ windows.* ]] + FUNC_TEST_ARGS='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + make functest hack/dockerized "hack/build-func-tests.sh" sha256:ceba12cbc33e4e37a707840478a630db561e2427b78c8c9f9cd6d0b73276ab32 go version go1.10 linux/amd64 Waiting for rsyncd to be ready. go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/functests.sh Running Suite: Tests Suite ========================== Random Seed: 1532590745 Will run 148 of 148 specs • [SLOW TEST:9.308 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should scale /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 to three, to two and then to zero replicas /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:8.272 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should scale /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 to five, to six and then to zero replicas /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •• ------------------------------ • [SLOW TEST:20.701 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should update readyReplicas once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:157 ------------------------------ •• ------------------------------ • [SLOW TEST:5.480 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should not scale when paused and scale when resume /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:223 ------------------------------ • [SLOW TEST:13.567 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should remove the finished VM /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:279 ------------------------------ ••••••••••• ------------------------------ • [SLOW TEST:17.715 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should successfully start with hook sidecar annotation /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:60 ------------------------------ • [SLOW TEST:18.095 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should call Collect and OnDefineDomain on the hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:67 ------------------------------ • [SLOW TEST:20.429 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should update domain XML with SM BIOS properties /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:83 ------------------------------ •••• ------------------------------ • [SLOW TEST:51.369 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 with a cirros image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:67 should return that we are running cirros /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:68 ------------------------------ • [SLOW TEST:63.066 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 with a fedora image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:77 should return that we are running fedora /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:78 ------------------------------ • [SLOW TEST:52.715 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should be able to reconnect to console multiple times /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:87 ------------------------------ • [SLOW TEST:16.283 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should wait until the virtual machine is in running state and return a stream interface /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:103 ------------------------------ • [SLOW TEST:30.224 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should fail waiting for the virtual machine instance to be running /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:111 ------------------------------ • [SLOW TEST:30.221 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should fail waiting for the expecter /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:134 ------------------------------ Service cluster-ip-vmi successfully exposed for virtualmachineinstance testvmiszw2z • [SLOW TEST:56.691 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:68 Should expose a Cluster IP service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:71 ------------------------------ Service cluster-ip-target-vmi successfully exposed for virtualmachineinstance testvmiszw2z •Service node-port-vmi successfully exposed for virtualmachineinstance testvmiszw2z ------------------------------ • [SLOW TEST:10.340 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose NodePort service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:124 Should expose a NodePort service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:129 ------------------------------ Service cluster-ip-udp-vmi successfully exposed for virtualmachineinstance testvmirblkv • [SLOW TEST:57.574 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VMI /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:166 Expose ClusterIP UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:173 Should expose a ClusterIP service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:177 ------------------------------ Service node-port-udp-vmi successfully exposed for virtualmachineinstance testvmirblkv • [SLOW TEST:8.223 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VMI /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:166 Expose NodePort UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:205 Should expose a NodePort service on a VMI and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:210 ------------------------------ Service cluster-ip-vmirs successfully exposed for vmirs replicasetkgftg • [SLOW TEST:81.312 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VMI replica set /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:253 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:286 Should create a ClusterIP service on VMRS and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:290 ------------------------------ Service cluster-ip-vm successfully exposed for virtualmachine testvmim66f5 VM testvmim66f5 was scheduled to start • [SLOW TEST:60.918 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on an VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:318 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:362 Connect to ClusterIP services that was set when VM was offline /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:363 ------------------------------ • ------------------------------ • [SLOW TEST:53.490 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:49.817 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:142.177 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:145.038 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:51.684 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:113 should create a writeable emptyDisk with the right capacity /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:115 ------------------------------ • [SLOW TEST:53.510 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined and a specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:163 should create a writeable emptyDisk with the specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:165 ------------------------------ • [SLOW TEST:48.653 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should be successfully started /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:207 ------------------------------ • [SLOW TEST:105.027 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should not persist data /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:218 ------------------------------ • [SLOW TEST:133.248 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With VirtualMachineInstance with two PVCs /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:266 should start vmi multiple times /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:278 ------------------------------ •• ------------------------------ • [SLOW TEST:16.161 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should update VirtualMachine once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:195 ------------------------------ • [SLOW TEST:12.321 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should remove VirtualMachineInstance once the VMI is marked for deletion /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:204 ------------------------------ • ------------------------------ • [SLOW TEST:27.564 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if it gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:245 ------------------------------ • [SLOW TEST:46.543 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if the VirtualMachineInstance's pod gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:265 ------------------------------ • [SLOW TEST:33.399 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should stop VirtualMachineInstance if running set to false /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:325 ------------------------------ • [SLOW TEST:188.449 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should start and stop VirtualMachineInstance multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:333 ------------------------------ • [SLOW TEST:47.422 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should not update the VirtualMachineInstance spec if Running /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:346 ------------------------------ • [SLOW TEST:204.661 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should survive guest shutdown, multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:387 ------------------------------ VM testvmiznmrx was scheduled to start • [SLOW TEST:16.306 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should start a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:436 ------------------------------ VM testvmic8qbf was scheduled to stop • [SLOW TEST:33.390 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should stop a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:467 ------------------------------ • [SLOW TEST:80.005 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting and stopping the same VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:90 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:91 should success multiple times /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:92 ------------------------------ • [SLOW TEST:15.546 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:111 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:112 should not modify the spec on status update /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:113 ------------------------------ • [SLOW TEST:35.427 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting multiple VMIs /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:129 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:130 should success /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:131 ------------------------------ • [SLOW TEST:123.028 seconds] Slirp /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:39 should be able to /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 VirtualMachineInstance with slirp interface /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • ------------------------------ • [SLOW TEST:10.302 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:7.998 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given an vm /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:7.749 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi preset /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:7.844 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi replica set /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • ------------------------------ • [SLOW TEST:50.591 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 should have cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:82 ------------------------------ • [SLOW TEST:164.111 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 with injected ssh-key /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:92 should have ssh-key under authorized keys /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:93 ------------------------------ • [SLOW TEST:59.821 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userData source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:118 should process provided cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:119 ------------------------------ • [SLOW TEST:52.189 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 should take user-data from k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:162 ------------------------------ • ------------------------------ • [SLOW TEST:16.056 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 should start it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:76 ------------------------------ • [SLOW TEST:18.462 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 should attach virt-launcher to it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:82 ------------------------------ •••• ------------------------------ • [SLOW TEST:73.373 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:170 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Alpine as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:26.290 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:170 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Cirros as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:15.405 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:201 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:202 should retry starting the VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:203 ------------------------------ • [SLOW TEST:16.029 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:201 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:202 should log warning and proceed once the secret is there /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:233 ------------------------------ • [SLOW TEST:38.626 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-launcher crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:281 should be stopped and have Failed phase /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:282 ------------------------------ • [SLOW TEST:31.113 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:304 should recover and continue management /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:305 ------------------------------ • [SLOW TEST:68.556 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler is responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:335 should indicate that a node is ready for vmis /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:336 ------------------------------ Pod name: disks-images-provider-gm64z Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-hvkkw Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-mtd7c Pod phase: Running level=info timestamp=2018-07-26T08:28:41.585108Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:28:43.282149Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:28:50.086311Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:28:50.978666Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 08:28:51 http: TLS handshake error from 10.129.0.1:34780: EOF level=info timestamp=2018-07-26T08:28:56.301301Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 level=info timestamp=2018-07-26T08:29:00.310029Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:29:01.168136Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 08:29:01 http: TLS handshake error from 10.129.0.1:34790: EOF level=info timestamp=2018-07-26T08:29:10.375390Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:29:10.615209Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/openapi/v2 proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-26T08:29:10.625653Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/swagger.json proto=HTTP/2.0 statusCode=404 contentLength=19 2018/07/26 08:29:11 http: TLS handshake error from 10.129.0.1:34800: EOF level=info timestamp=2018-07-26T08:29:11.659317Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:29:13.339310Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-szt65 Pod phase: Running 2018/07/26 08:27:07 http: TLS handshake error from 10.128.0.1:46940: EOF 2018/07/26 08:27:17 http: TLS handshake error from 10.128.0.1:46986: EOF 2018/07/26 08:27:27 http: TLS handshake error from 10.128.0.1:47036: EOF 2018/07/26 08:27:37 http: TLS handshake error from 10.128.0.1:47082: EOF 2018/07/26 08:27:47 http: TLS handshake error from 10.128.0.1:47128: EOF 2018/07/26 08:27:57 http: TLS handshake error from 10.128.0.1:47276: EOF 2018/07/26 08:28:07 http: TLS handshake error from 10.128.0.1:47322: EOF 2018/07/26 08:28:17 http: TLS handshake error from 10.128.0.1:47368: EOF 2018/07/26 08:28:27 http: TLS handshake error from 10.128.0.1:47418: EOF 2018/07/26 08:28:37 http: TLS handshake error from 10.128.0.1:47464: EOF 2018/07/26 08:28:47 http: TLS handshake error from 10.128.0.1:47510: EOF level=info timestamp=2018-07-26T08:28:51.453285Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 08:28:57 http: TLS handshake error from 10.128.0.1:47568: EOF 2018/07/26 08:29:07 http: TLS handshake error from 10.128.0.1:47614: EOF 2018/07/26 08:29:17 http: TLS handshake error from 10.128.0.1:47660: EOF Pod name: virt-controller-7d57d96b65-8jbq9 Pod phase: Running level=info timestamp=2018-07-26T08:25:48.061191Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmilsxbr kind= uid=7feff4f9-90ad-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:25:48.061324Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmilsxbr kind= uid=7feff4f9-90ad-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:26:14.353598Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvminzcgd kind= uid=8f9c155b-90ad-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:26:14.353752Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvminzcgd kind= uid=8f9c155b-90ad-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:26:29.757320Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmid4vbr kind= uid=98ca7a3e-90ad-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:26:29.757535Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmid4vbr kind= uid=98ca7a3e-90ad-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:26:45.785355Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmicpmfb kind= uid=a2584d0e-90ad-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:26:45.785515Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmicpmfb kind= uid=a2584d0e-90ad-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:26:45.867486Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmicpmfb\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmicpmfb" level=info timestamp=2018-07-26T08:27:24.410671Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizd6f5 kind= uid=b95df4a8-90ad-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:27:24.410788Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmizd6f5 kind= uid=b95df4a8-90ad-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:27:24.501028Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmizd6f5\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmizd6f5" level=info timestamp=2018-07-26T08:29:03.935674Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmikxjdj kind= uid=f4afe54a-90ad-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:29:03.935814Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmikxjdj kind= uid=f4afe54a-90ad-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:29:04.047401Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmikxjdj\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmikxjdj" Pod name: virt-controller-7d57d96b65-ll9d9 Pod phase: Running level=info timestamp=2018-07-26T07:37:25.261416Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-cnhd8 Pod phase: Running level=info timestamp=2018-07-26T08:27:46.025335Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmizd6f5 kind=VirtualMachineInstance uid=b95df4a8-90ad-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:27:48.205991Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-26T08:27:48.207252Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmizd6f5 kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-26T08:27:48.207448Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmizd6f5 kind=VirtualMachineInstance uid=b95df4a8-90ad-11e8-90e0-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T08:27:48.222794Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmizd6f5 kind=VirtualMachineInstance uid=b95df4a8-90ad-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:27:48.222905Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmizd6f5 kind= uid=b95df4a8-90ad-11e8-90e0-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T08:27:48.222950Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmizd6f5 kind= uid=b95df4a8-90ad-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:27:55.324647Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmizd6f5 kind= uid=b95df4a8-90ad-11e8-90e0-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:27:55.324938Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmizd6f5 kind= uid=b95df4a8-90ad-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:27:55.404183Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmizd6f5 kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:27:55.404352Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmizd6f5 kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:29:18.583389Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmikxjdj kind= uid=f4afe54a-90ad-11e8-90e0-525500d15501 msg="Processing vmi update" level=error timestamp=2018-07-26T08:29:18.603982Z pos=vm.go:397 component=virt-handler namespace=kubevirt-test-default name=testvmikxjdj kind= uid=f4afe54a-90ad-11e8-90e0-525500d15501 reason="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" msg="Synchronizing the VirtualMachineInstance failed." level=info timestamp=2018-07-26T08:29:18.612616Z pos=vm.go:251 component=virt-handler reason="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmikxjdj" level=info timestamp=2018-07-26T08:29:18.612726Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmikxjdj kind= uid=f4afe54a-90ad-11e8-90e0-525500d15501 msg="Processing vmi update" Pod name: virt-handler-zwtd9 Pod phase: Running level=info timestamp=2018-07-26T08:24:53.761198Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind= uid=54348f6b-90ad-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:25:48.308062Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind= uid=54348f6b-90ad-11e8-90e0-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-26T08:25:48.310161Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind= uid=54348f6b-90ad-11e8-90e0-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-26T08:25:48.315006Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind= uid=54348f6b-90ad-11e8-90e0-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmijtdjx" level=info timestamp=2018-07-26T08:25:48.532693Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind= uid=54348f6b-90ad-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:25:48.533382Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-26T08:25:48.533903Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-26T08:25:48.538866Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:25:48.541005Z pos=vm.go:330 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind=VirtualMachineInstance uid= msg="Shutting down domain for deleted VirtualMachineInstance object." level=info timestamp=2018-07-26T08:25:48.541096Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind=VirtualMachineInstance uid= msg="Processing shutdown." level=info timestamp=2018-07-26T08:25:48.541315Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:25:48.533796Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-26T08:25:48.541619Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-26T08:25:48.542394Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:25:48.542592Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijtdjx kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmikxjdj-56v5z Pod phase: Running level=info timestamp=2018-07-26T08:29:07.295264Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-26T08:29:07.295565Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-26T08:29:07.298168Z pos=libvirt.go:256 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-26T08:29:17.305932Z pos=libvirt.go:271 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-26T08:29:17.329155Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmikxjdj" level=info timestamp=2018-07-26T08:29:17.330533Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-26T08:29:17.330714Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" level=error timestamp=2018-07-26T08:29:18.597811Z pos=manager.go:159 component=virt-launcher namespace=kubevirt-test-default name=testvmikxjdj kind= uid=f4afe54a-90ad-11e8-90e0-525500d15501 reason="virError(Code=0, Domain=0, Message='Missing error')" msg="Getting the domain failed." level=error timestamp=2018-07-26T08:29:18.598072Z pos=server.go:68 component=virt-launcher namespace=kubevirt-test-default name=testvmikxjdj kind= uid=f4afe54a-90ad-11e8-90e0-525500d15501 reason="virError(Code=0, Domain=0, Message='Missing error')" msg="Failed to sync vmi" level=info timestamp=2018-07-26T08:29:18.641682Z pos=cloud-init.go:254 component=virt-launcher msg="generated nocloud iso file /var/run/libvirt/kubevirt-ephemeral-disk/cloud-init-data/kubevirt-test-default/testvmikxjdj/noCloud.iso" level=error timestamp=2018-07-26T08:29:18.659851Z pos=common.go:126 component=virt-launcher msg="updated MAC for interface: eth0 - 0a:58:0a:dd:48:21" level=info timestamp=2018-07-26T08:29:18.661677Z pos=converter.go:751 component=virt-launcher msg="Found nameservers in /etc/resolv.conf: \ufffd\ufffdBf" level=info timestamp=2018-07-26T08:29:18.661746Z pos=converter.go:752 component=virt-launcher msg="Found search domains in /etc/resolv.conf: kubevirt-test-default.svc.cluster.local svc.cluster.local cluster.local" level=info timestamp=2018-07-26T08:29:18.662117Z pos=dhcp.go:62 component=virt-launcher msg="Starting SingleClientDHCPServer" • Failure in Spec Setup (BeforeEach) [112.272 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler is not responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:366 the node controller should react [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:405 Unexpected Warning event received. Expected : Warning not to equal : Warning /root/go/src/kubevirt.io/kubevirt/tests/utils.go:245 ------------------------------ level=info timestamp=2018-07-26T08:29:04.582969Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmikxjdj-56v5z" level=info timestamp=2018-07-26T08:29:19.035789Z pos=utils.go:243 component=tests msg="Pod owner ship transferred to the node virt-launcher-testvmikxjdj-56v5z" level=error timestamp=2018-07-26T08:29:19.069127Z pos=utils.go:241 component=tests reason="unexpected warning event received" msg="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" • [SLOW TEST:19.569 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with node tainted /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:458 the vmi with tolerations should be scheduled /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:480 ------------------------------ • ------------------------------ S [SKIPPING] [0.252 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:530 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-default [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:535 ------------------------------ S [SKIPPING] [0.084 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:530 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-alternative [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:535 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.083 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:591 should enable emulation in virt-launcher [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:603 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:599 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.118 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:591 should be reflected in domain XML [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:640 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:599 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.078 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:591 should request a TUN device but not KVM [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:684 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:599 ------------------------------ •••• ------------------------------ • [SLOW TEST:16.722 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance's Pod /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:836 should result in the VirtualMachineInstance moving to a finalized state /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:837 ------------------------------ • [SLOW TEST:35.244 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:868 with an active pod. /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:869 should result in pod being terminated /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:870 ------------------------------ • [SLOW TEST:24.730 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:868 with grace period greater than 0 /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:893 should run graceful shutdown /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:894 ------------------------------ • [SLOW TEST:29.184 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:945 should be in Failed phase /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:946 ------------------------------ • [SLOW TEST:23.883 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:945 should be left alone by virt-handler /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:973 ------------------------------ Pod name: disks-images-provider-gm64z Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-hvkkw Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-mtd7c Pod phase: Running level=info timestamp=2018-07-26T08:33:02.036321Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 08:33:11 http: TLS handshake error from 10.129.0.1:35044: EOF level=info timestamp=2018-07-26T08:33:12.082457Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:33:12.254407Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:33:14.221380Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 08:33:21 http: TLS handshake error from 10.129.0.1:35054: EOF level=info timestamp=2018-07-26T08:33:22.134201Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:33:26.318368Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 08:33:31 http: TLS handshake error from 10.129.0.1:35064: EOF level=info timestamp=2018-07-26T08:33:31.984274Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:33:32.174299Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 08:33:41 http: TLS handshake error from 10.129.0.1:35074: EOF level=info timestamp=2018-07-26T08:33:42.230825Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:33:42.316520Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:33:44.345605Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-api-7d79764579-szt65 Pod phase: Running 2018/07/26 08:31:27 http: TLS handshake error from 10.128.0.1:48290: EOF 2018/07/26 08:31:37 http: TLS handshake error from 10.128.0.1:48336: EOF 2018/07/26 08:31:47 http: TLS handshake error from 10.128.0.1:48382: EOF 2018/07/26 08:31:57 http: TLS handshake error from 10.128.0.1:48432: EOF 2018/07/26 08:32:07 http: TLS handshake error from 10.128.0.1:48478: EOF 2018/07/26 08:32:17 http: TLS handshake error from 10.128.0.1:48524: EOF 2018/07/26 08:32:27 http: TLS handshake error from 10.128.0.1:48574: EOF 2018/07/26 08:32:37 http: TLS handshake error from 10.128.0.1:48620: EOF 2018/07/26 08:32:47 http: TLS handshake error from 10.128.0.1:48666: EOF level=info timestamp=2018-07-26T08:32:56.746753Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 08:32:57 http: TLS handshake error from 10.128.0.1:48716: EOF 2018/07/26 08:33:07 http: TLS handshake error from 10.128.0.1:48762: EOF 2018/07/26 08:33:17 http: TLS handshake error from 10.128.0.1:48808: EOF 2018/07/26 08:33:27 http: TLS handshake error from 10.128.0.1:48858: EOF 2018/07/26 08:33:37 http: TLS handshake error from 10.128.0.1:48904: EOF Pod name: virt-controller-7d57d96b65-8jbq9 Pod phase: Running level=info timestamp=2018-07-26T08:31:35.673634Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5vxx\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5vxx" level=info timestamp=2018-07-26T08:31:35.688393Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmil5vxx\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmil5vxx" level=info timestamp=2018-07-26T08:32:10.883721Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:32:10.883943Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:32:35.546615Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirvvgx kind= uid=72d0e899-90ae-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:32:35.546740Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmirvvgx kind= uid=72d0e899-90ae-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:32:35.723034Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirvvgx\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirvvgx" level=info timestamp=2018-07-26T08:32:35.760481Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmirvvgx\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmirvvgx" level=info timestamp=2018-07-26T08:33:04.720547Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi69rrn kind= uid=8435461b-90ae-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:33:04.720680Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi69rrn kind= uid=8435461b-90ae-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:33:04.811478Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi69rrn\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi69rrn" level=info timestamp=2018-07-26T08:33:28.617784Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqxpmw kind= uid=9273b16d-90ae-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:33:28.623975Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqxpmw kind= uid=9273b16d-90ae-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:33:28.790592Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqxpmw\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqxpmw" level=info timestamp=2018-07-26T08:33:28.816651Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiqxpmw\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiqxpmw" Pod name: virt-controller-7d57d96b65-ll9d9 Pod phase: Running level=info timestamp=2018-07-26T07:37:25.261416Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-9tv26 Pod phase: Running level=info timestamp=2018-07-26T08:33:19.714501Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi69rrn kind= uid=8435461b-90ae-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:33:23.339706Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-26T08:33:23.340101Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmi69rrn kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-26T08:33:23.340224Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmi69rrn kind= uid=8435461b-90ae-11e8-90e0-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T08:33:23.349731Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi69rrn kind= uid=8435461b-90ae-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:33:23.357451Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmi69rrn kind= uid=8435461b-90ae-11e8-90e0-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T08:33:23.357494Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi69rrn kind= uid=8435461b-90ae-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:33:28.512035Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi69rrn kind= uid=8435461b-90ae-11e8-90e0-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:33:28.512261Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi69rrn kind= uid=8435461b-90ae-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:33:28.573476Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi69rrn kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:33:28.573610Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi69rrn kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:33:44.292780Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmiqxpmw kind= uid=9273b16d-90ae-11e8-90e0-525500d15501 msg="Processing vmi update" level=error timestamp=2018-07-26T08:33:44.315140Z pos=vm.go:397 component=virt-handler namespace=kubevirt-test-default name=testvmiqxpmw kind= uid=9273b16d-90ae-11e8-90e0-525500d15501 reason="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" msg="Synchronizing the VirtualMachineInstance failed." level=info timestamp=2018-07-26T08:33:44.341473Z pos=vm.go:251 component=virt-handler reason="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmiqxpmw" level=info timestamp=2018-07-26T08:33:44.341627Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmiqxpmw kind= uid=9273b16d-90ae-11e8-90e0-525500d15501 msg="Processing vmi update" Pod name: virt-handler-qc5ds Pod phase: Running level=info timestamp=2018-07-26T08:32:35.413025Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-26T08:32:35.413744Z pos=vm.go:540 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="grace period expired, killing deleted VirtualMachineInstance testvmirgm7f" level=info timestamp=2018-07-26T08:32:35.627456Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-26T08:32:35.627677Z pos=vm.go:678 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind=Domain uid= msg="Domain deleted" level=info timestamp=2018-07-26T08:32:35.632309Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type DELETED" level=info timestamp=2018-07-26T08:32:35.635296Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:32:35.635423Z pos=vm.go:342 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="Shutting down domain for VirtualMachineInstance with deletion timestamp." level=info timestamp=2018-07-26T08:32:35.635452Z pos=vm.go:383 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="Processing shutdown." level=info timestamp=2018-07-26T08:32:35.658549Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:32:35.659136Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:32:35.659293Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:32:35.809413Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:32:35.809743Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind= uid=641ba966-90ae-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:32:35.823480Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:32:35.823605Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmirgm7f kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmiqxpmw-z27dm Pod phase: Running level=info timestamp=2018-07-26T08:33:32.488197Z pos=manager.go:69 component=virt-launcher msg="Collected all requested hook sidecar sockets" level=info timestamp=2018-07-26T08:33:32.488494Z pos=manager.go:72 component=virt-launcher msg="Sorted all collected sidecar sockets per hook point based on their priority and name: map[]" level=info timestamp=2018-07-26T08:33:32.489946Z pos=libvirt.go:256 component=virt-launcher msg="Connecting to libvirt daemon: qemu:///system" level=info timestamp=2018-07-26T08:33:42.499418Z pos=libvirt.go:271 component=virt-launcher msg="Connected to libvirt daemon" level=info timestamp=2018-07-26T08:33:42.534729Z pos=virt-launcher.go:143 component=virt-launcher msg="Watchdog file created at /var/run/kubevirt/watchdog-files/kubevirt-test-default_testvmiqxpmw" level=info timestamp=2018-07-26T08:33:42.536606Z pos=client.go:152 component=virt-launcher msg="Registered libvirt event notify callback" level=info timestamp=2018-07-26T08:33:42.536834Z pos=virt-launcher.go:60 component=virt-launcher msg="Marked as ready" level=error timestamp=2018-07-26T08:33:44.307699Z pos=manager.go:159 component=virt-launcher namespace=kubevirt-test-default name=testvmiqxpmw kind= uid=9273b16d-90ae-11e8-90e0-525500d15501 reason="virError(Code=0, Domain=0, Message='Missing error')" msg="Getting the domain failed." level=error timestamp=2018-07-26T08:33:44.308007Z pos=server.go:68 component=virt-launcher namespace=kubevirt-test-default name=testvmiqxpmw kind= uid=9273b16d-90ae-11e8-90e0-525500d15501 reason="virError(Code=0, Domain=0, Message='Missing error')" msg="Failed to sync vmi" level=error timestamp=2018-07-26T08:33:44.366994Z pos=common.go:126 component=virt-launcher msg="updated MAC for interface: eth0 - 0a:58:0a:ac:3d:aa" level=info timestamp=2018-07-26T08:33:44.371320Z pos=converter.go:751 component=virt-launcher msg="Found nameservers in /etc/resolv.conf: \ufffd\ufffdBf" level=info timestamp=2018-07-26T08:33:44.371485Z pos=converter.go:752 component=virt-launcher msg="Found search domains in /etc/resolv.conf: kubevirt-test-default.svc.cluster.local svc.cluster.local cluster.local" level=info timestamp=2018-07-26T08:33:44.372158Z pos=dhcp.go:62 component=virt-launcher msg="Starting SingleClientDHCPServer" • Failure [100.440 seconds] Health Monitoring /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:37 A VirtualMachineInstance with a watchdog device /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:56 should be shut down when the watchdog expires [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:57 Unexpected Warning event received. Expected : Warning not to equal : Warning /root/go/src/kubevirt.io/kubevirt/tests/utils.go:245 ------------------------------ STEP: Starting a VirtualMachineInstance level=info timestamp=2018-07-26T08:33:29.263004Z pos=utils.go:243 component=tests msg="Created virtual machine pod virt-launcher-testvmiqxpmw-z27dm" level=info timestamp=2018-07-26T08:33:44.748901Z pos=utils.go:243 component=tests msg="Pod owner ship transferred to the node virt-launcher-testvmiqxpmw-z27dm" level=error timestamp=2018-07-26T08:33:44.792242Z pos=utils.go:241 component=tests reason="unexpected warning event received" msg="server error. command Launcher.Sync failed: virError(Code=0, Domain=0, Message='Missing error')" STEP: Expecting the VirtualMachineInstance console STEP: Killing the watchdog device STEP: Checking that the VirtualMachineInstance has Failed status • [SLOW TEST:16.652 seconds] VNC /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:54 with VNC connection /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:62 should allow accessing the VNC device /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:64 ------------------------------ ••volumedisk0 compute ------------------------------ • [SLOW TEST:55.762 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with 3 CPU cores /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:56 should report 3 cpu cores under guest OS /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:62 ------------------------------ • [SLOW TEST:18.769 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:108 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-2Mi /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ S [SKIPPING] [0.224 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:108 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-1Gi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 No node with hugepages hugepages-1Gi capacity /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:160 ------------------------------ • ------------------------------ • [SLOW TEST:153.159 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:284 should report defined CPU model /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:285 ------------------------------ • [SLOW TEST:152.549 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model equals to passthrough /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:312 should report exactly the same model as node CPU /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:313 ------------------------------ • [SLOW TEST:128.179 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model not defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:336 should report CPU model from libvirt capabilities /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:337 ------------------------------ • [SLOW TEST:52.965 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 New VirtualMachineInstance with all supported drives /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:357 should have all the device nodes /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:380 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.037 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to start a vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:133 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.004 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to stop a running vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:139 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have correct UUID /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:192 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have pod IP /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:208 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to start a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:242 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to stop a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:250 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1352 ------------------------------ •••• ------------------------------ • [SLOW TEST:158.826 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • Pod name: disks-images-provider-gm64z Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-hvkkw Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-mtd7c Pod phase: Running level=info timestamp=2018-07-26T08:50:01.528010Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:50:08.033477Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:50:08.898469Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 08:50:11 http: TLS handshake error from 10.129.0.1:36096: EOF level=info timestamp=2018-07-26T08:50:14.368167Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/openapi/v2 proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-26T08:50:14.370033Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/swagger.json proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-26T08:50:18.075700Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 08:50:21 http: TLS handshake error from 10.129.0.1:36106: EOF level=info timestamp=2018-07-26T08:50:28.125025Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:50:31.126921Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 08:50:31 http: TLS handshake error from 10.129.0.1:36116: EOF level=info timestamp=2018-07-26T08:50:31.572347Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:50:38.165343Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-26T08:50:38.964364Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/26 08:50:41 http: TLS handshake error from 10.129.0.1:36126: EOF Pod name: virt-api-7d79764579-szt65 Pod phase: Running level=info timestamp=2018-07-26T08:48:52.009132Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 08:48:57 http: TLS handshake error from 10.128.0.1:53740: EOF level=info timestamp=2018-07-26T08:49:01.885527Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 08:49:07 http: TLS handshake error from 10.128.0.1:53790: EOF 2018/07/26 08:49:17 http: TLS handshake error from 10.128.0.1:53838: EOF 2018/07/26 08:49:27 http: TLS handshake error from 10.128.0.1:53884: EOF 2018/07/26 08:49:37 http: TLS handshake error from 10.128.0.1:53934: EOF 2018/07/26 08:49:47 http: TLS handshake error from 10.128.0.1:53980: EOF 2018/07/26 08:49:57 http: TLS handshake error from 10.128.0.1:54026: EOF level=info timestamp=2018-07-26T08:50:01.928654Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 08:50:07 http: TLS handshake error from 10.128.0.1:54076: EOF 2018/07/26 08:50:17 http: TLS handshake error from 10.128.0.1:54122: EOF 2018/07/26 08:50:27 http: TLS handshake error from 10.128.0.1:54168: EOF level=info timestamp=2018-07-26T08:50:31.908163Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/26 08:50:37 http: TLS handshake error from 10.128.0.1:54218: EOF Pod name: virt-controller-7d57d96b65-8jbq9 Pod phase: Running level=info timestamp=2018-07-26T08:44:56.119423Z pos=vm.go:262 component=virt-controller service=http msg="vmi is nil" level=info timestamp=2018-07-26T08:44:56.308820Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvm\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/default/testvm, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: 2bc0c155-90b0-11e8-90e0-525500d15501, UID in object meta: " msg="reenqueuing VirtualMachineInstance default/testvm" level=info timestamp=2018-07-26T08:44:56.513383Z pos=vm.go:135 component=virt-controller service=http namespace=default name=testvm kind= uid=2b70d241-90b0-11e8-90e0-525500d15501 msg="Started processing VM" level=info timestamp=2018-07-26T08:44:56.513484Z pos=vm.go:186 component=virt-controller service=http namespace=default name=testvm kind= uid=2b70d241-90b0-11e8-90e0-525500d15501 msg="Creating or the VirtualMachineInstance: false" level=info timestamp=2018-07-26T08:44:56.513513Z pos=vm.go:262 component=virt-controller service=http msg="vmi is nil" level=info timestamp=2018-07-26T08:44:56.920438Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7j7ff kind= uid=2cb62227-90b0-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:44:56.920562Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7j7ff kind= uid=2cb62227-90b0-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:44:56.930777Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi72fwf kind= uid=2cb76dd7-90b0-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:44:56.930941Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi72fwf kind= uid=2cb76dd7-90b0-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:44:56.950532Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:44:56.950652Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:44:56.963033Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiv9bd9 kind= uid=2cbc29d8-90b0-11e8-90e0-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-26T08:44:56.963141Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiv9bd9 kind= uid=2cbc29d8-90b0-11e8-90e0-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-26T08:44:58.910626Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiv9bd9\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiv9bd9" level=info timestamp=2018-07-26T08:44:59.307377Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmiv9bd9\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmiv9bd9" Pod name: virt-controller-7d57d96b65-ll9d9 Pod phase: Running level=info timestamp=2018-07-26T07:37:25.261416Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-9tv26 Pod phase: Running level=info timestamp=2018-07-26T08:45:30.956082Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T08:45:29.155594Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi7j7ff kind= uid=2cb62227-90b0-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:45:31.500747Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:45:30.737838Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T08:45:31.557174Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="No update processing required" level=info timestamp=2018-07-26T08:45:31.956559Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-26T08:45:33.055397Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmiv9bd9 kind= uid=2cbc29d8-90b0-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:45:33.055591Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmiv9bd9 kind= uid=2cbc29d8-90b0-11e8-90e0-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T08:45:33.208460Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmiv9bd9 kind= uid=2cbc29d8-90b0-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=error timestamp=2018-07-26T08:45:33.295396Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmipnp5k\": the object has been modified; please apply your changes to the latest version and try again" msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-26T08:45:33.295486Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmipnp5k\": the object has been modified; please apply your changes to the latest version and try again" msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmipnp5k" level=info timestamp=2018-07-26T08:45:33.295592Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T08:45:33.900284Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:45:33.900450Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-26T08:45:33.947103Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-handler-qc5ds Pod phase: Running level=info timestamp=2018-07-26T08:43:56.663158Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi92spb kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:43:56.679994Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmi92spb kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:43:56.680330Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi92spb kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=error timestamp=2018-07-26T08:43:56.704563Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmikmpcr kind= uid=c89d47cb-90af-11e8-90e0-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmikmpcr\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmikmpcr, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: c89d47cb-90af-11e8-90e0-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-26T08:43:56.705103Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmikmpcr\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmikmpcr, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: c89d47cb-90af-11e8-90e0-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmikmpcr" level=info timestamp=2018-07-26T08:43:56.705721Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmikmpcr kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:43:56.705996Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmikmpcr kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=error timestamp=2018-07-26T08:43:56.707692Z pos=vm.go:404 component=virt-handler namespace=kubevirt-test-default name=testvmijvt9w kind= uid=bd011045-90af-11e8-90e0-525500d15501 reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmijvt9w\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmijvt9w, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: bd011045-90af-11e8-90e0-525500d15501, UID in object meta: " msg="Updating the VirtualMachineInstance status failed." level=info timestamp=2018-07-26T08:43:56.708152Z pos=vm.go:251 component=virt-handler reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmijvt9w\": StorageError: invalid object, Code: 4, Key: /kubernetes.io/kubevirt.io/virtualmachineinstances/kubevirt-test-default/testvmijvt9w, ResourceVersion: 0, AdditionalErrorMsg: Precondition failed: UID in precondition: bd011045-90af-11e8-90e0-525500d15501, UID in object meta: " msg="re-enqueuing VirtualMachineInstance kubevirt-test-default/testvmijvt9w" level=info timestamp=2018-07-26T08:43:56.708264Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmijvt9w kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:43:56.708341Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijvt9w kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:43:56.711121Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmikmpcr kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:43:56.711450Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmikmpcr kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." level=info timestamp=2018-07-26T08:43:56.714061Z pos=vm.go:386 component=virt-handler namespace=kubevirt-test-default name=testvmijvt9w kind=VirtualMachineInstance uid= msg="Processing local ephemeral data cleanup for shutdown domain." level=info timestamp=2018-07-26T08:43:56.714140Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmijvt9w kind=VirtualMachineInstance uid= msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmi72fwf-r8n7s Pod phase: Running level=info timestamp=2018-07-26T08:45:23.349444Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-26T08:45:24.545489Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T08:45:24.604013Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID 3948668b-f990-4bac-bc80-c1904afbb0b9" level=info timestamp=2018-07-26T08:45:24.606206Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T08:45:24.651240Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:25.383930Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T08:45:25.452744Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T08:45:25.494914Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmi72fwf kind= uid=2cb76dd7-90b0-11e8-90e0-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T08:45:25.499564Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi72fwf kind= uid=2cb76dd7-90b0-11e8-90e0-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T08:45:25.520531Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:25.520667Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T08:45:25.569640Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T08:45:25.616692Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 3948668b-f990-4bac-bc80-c1904afbb0b9: 205" level=info timestamp=2018-07-26T08:45:25.647279Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:30.762364Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi72fwf kind= uid=2cb76dd7-90b0-11e8-90e0-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmi7j7ff-98xrv Pod phase: Running level=info timestamp=2018-07-26T08:45:22.125141Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-26T08:45:22.883703Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T08:45:22.921907Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID e361e320-0c37-4def-b9c9-cc0789892616" level=info timestamp=2018-07-26T08:45:22.922223Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T08:45:22.967112Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:23.652456Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T08:45:23.735123Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T08:45:23.752669Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmi7j7ff kind= uid=2cb62227-90b0-11e8-90e0-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T08:45:23.750547Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:23.752864Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T08:45:23.755491Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi7j7ff kind= uid=2cb62227-90b0-11e8-90e0-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T08:45:23.790063Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T08:45:23.905115Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:23.940244Z pos=monitor.go:222 component=virt-launcher msg="Found PID for e361e320-0c37-4def-b9c9-cc0789892616: 194" level=info timestamp=2018-07-26T08:45:26.862052Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi7j7ff kind= uid=2cb62227-90b0-11e8-90e0-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmipnp5k-xxv4g Pod phase: Running level=info timestamp=2018-07-26T08:45:26.785127Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T08:45:26.796317Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID f62f3dbf-e683-4b58-a6a7-22456e36710e" level=info timestamp=2018-07-26T08:45:26.804092Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T08:45:26.986453Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:27.587079Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T08:45:27.674406Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T08:45:27.723095Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T08:45:27.728535Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T08:45:27.822160Z pos=monitor.go:222 component=virt-launcher msg="Found PID for f62f3dbf-e683-4b58-a6a7-22456e36710e: 203" level=info timestamp=2018-07-26T08:45:28.111596Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:28.112044Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T08:45:28.169526Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T08:45:32.084178Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:33.309787Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T08:45:33.910223Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmipnp5k kind= uid=2cb9a26b-90b0-11e8-90e0-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmiv9bd9-5w2jx Pod phase: Running level=info timestamp=2018-07-26T08:45:23.402503Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-26T08:45:25.002370Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-26T08:45:25.020783Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID 41501423-cd85-4f8d-aadf-12650972cf83" level=info timestamp=2018-07-26T08:45:25.021090Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-26T08:45:25.123748Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:26.037598Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 41501423-cd85-4f8d-aadf-12650972cf83: 196" level=info timestamp=2018-07-26T08:45:26.086044Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-26T08:45:26.177659Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T08:45:26.207367Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmiv9bd9 kind= uid=2cbc29d8-90b0-11e8-90e0-525500d15501 msg="Domain started." level=info timestamp=2018-07-26T08:45:26.211725Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiv9bd9 kind= uid=2cbc29d8-90b0-11e8-90e0-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-26T08:45:27.987690Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:27.988913Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-26T08:45:28.040527Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-26T08:45:31.989540Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-26T08:45:33.090253Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiv9bd9 kind= uid=2cbc29d8-90b0-11e8-90e0-525500d15501 msg="Synced vmi" ------------------------------ • Failure [187.172 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance with custom MAC address [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Expected error: : 180000000000 expect: timer expired after 180 seconds not to have occurred /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:224 ------------------------------ STEP: checking br1 MTU inside the pod level=info timestamp=2018-07-26T08:47:38.633647Z pos=vmi_networking_test.go:185 component=tests msg="4: br1: mtu 1450 qdisc noqueue state UP group default \n link/ether 0a:58:0a:22:96:a0 brd ff:ff:ff:ff:ff:ff\n inet 169.254.75.86/32 brd 169.254.75.86 scope global br1\n valid_lft forever preferred_lft forever\n inet6 fe80::858:aff:fe22:96a0/64 scope link \n valid_lft forever preferred_lft forever\n" STEP: checking eth0 MTU inside the VirtualMachineInstance level=info timestamp=2018-07-26T08:47:39.141549Z pos=vmi_networking_test.go:205 component=tests msg="[{1 \r\n$ [$ ]} {3 ip address show eth0\r\n2: eth0: mtu 1450 qdisc pfifo_fast qlen 1000\r\n [2: eth0: mtu 1450 qdisc pfifo_fast qlen 1000\r\n]} {5 link/ether 0 [0]}]" STEP: checking the VirtualMachineInstance can send MTU sized frames to another VirtualMachineInstance level=info timestamp=2018-07-26T08:50:44.647577Z pos=utils.go:1200 component=tests namespace=kubevirt-test-default name=testvmi72fwf kind=VirtualMachineInstance uid=2cb76dd7-90b0-11e8-90e0-525500d15501 msg="[{1 \r\n\r\n$ [$ ]} {3 ping 10.129.0.124 -c 1 -w 5 -s 1422\r\nPING 10.129.0.124 (10.129.0.124): 1422 data bytes\r\n\r\n--- 10.129.0.124 ping statistics ---\r\n5 packets transmitted, 0 packets received, 100% packet loss\r\n$ [$ ]} {5 echo $?\r\n1\r\n$ []}]" • ------------------------------ • [SLOW TEST:5.112 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on the same node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:5.082 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on a different node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •• ------------------------------ • [SLOW TEST:5.925 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 with a service matching the vmi exposed /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:283 should be able to reach the vmi based on labels specified on the vmi /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:303 ------------------------------ • ------------------------------ • [SLOW TEST:5.274 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 with a subdomain and a headless service given /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:330 should be able to reach the vmi via its unique fully qualified domain name /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:353 ------------------------------ • [SLOW TEST:59.012 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom interface model /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:379 should expose the right device type to the guest /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:380 ------------------------------ • ------------------------------ • [SLOW TEST:55.641 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:413 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:414 ------------------------------ • [SLOW TEST:62.597 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address in non-conventional format /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:425 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:426 ------------------------------ • [SLOW TEST:59.135 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address and slirp interface /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:438 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:439 ------------------------------ • [SLOW TEST:63.728 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with disabled automatic attachment of interfaces /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:451 should not configure any external interfaces /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:452 ------------------------------ • [SLOW TEST:37.950 seconds] LeaderElection /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:43 Start a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:53 when the controller pod is not running /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:54 should success /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:55 ------------------------------ Waiting for namespace kubevirt-test-default to be removed, this can take a while ... Waiting for namespace kubevirt-test-alternative to be removed, this can take a while ... Summarizing 3 Failures: [Fail] VMIlifecycle Creating a VirtualMachineInstance when virt-handler is not responsive [BeforeEach] the node controller should react /root/go/src/kubevirt.io/kubevirt/tests/utils.go:245 [Fail] Health Monitoring A VirtualMachineInstance with a watchdog device [It] should be shut down when the watchdog expires /root/go/src/kubevirt.io/kubevirt/tests/utils.go:245 [Fail] Networking should be able to reach [It] the Inbound VirtualMachineInstance with custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:224 Ran 136 of 148 Specs in 4706.428 seconds FAIL! -- 133 Passed | 3 Failed | 0 Pending | 12 Skipped --- FAIL: TestTests (4706.46s) FAIL make: *** [functest] Error 1 + make cluster-down ./cluster/down.sh