+ export WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + [[ openshift-3.10-release =~ openshift-.* ]] + [[ openshift-3.10-release =~ .*-crio-.* ]] + export KUBEVIRT_PROVIDER=os-3.10.0 + KUBEVIRT_PROVIDER=os-3.10.0 + export KUBEVIRT_NUM_NODES=2 + KUBEVIRT_NUM_NODES=2 + export NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + export NAMESPACE=kube-system + NAMESPACE=kube-system + trap '{ make cluster-down; }' EXIT SIGINT SIGTERM SIGSTOP + make cluster-down ./cluster/down.sh Unable to find image 'kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' locally Trying to pull repository docker.io/kubevirtci/gocli ... sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a: Pulling from docker.io/kubevirtci/gocli ca1df8c2ad92: Pulling fs layer bd32923567b2: Pulling fs layer ca1df8c2ad92: Verifying Checksum ca1df8c2ad92: Download complete bd32923567b2: Verifying Checksum bd32923567b2: Download complete ca1df8c2ad92: Pull complete bd32923567b2: Pull complete Digest: sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a Status: Downloaded newer image for docker.io/kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a + make cluster-up ./cluster/up.sh Downloading .................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. Downloading ....... 2018/07/18 11:41:35 Waiting for host: 192.168.66.102:22 2018/07/18 11:41:38 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/18 11:41:46 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/18 11:41:54 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/18 11:41:59 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: connection refused. Sleeping 5s 2018/07/18 11:42:04 Connected to tcp://192.168.66.102:22 + systemctl stop origin-node.service + rm -rf /etc/origin/ /etc/etcd/ /var/lib/origin /var/lib/etcd/ ++ docker ps -q + containers= + '[' -n '' ']' ++ docker ps -q -a + containers='2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3' + '[' -n '2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3' ']' + docker rm -f 2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3 2cfbef31c987 e183c40c07dc 861f604efed4 12902ad26342 028539b1f68b bd6f07c1906c d1f95a33a226 c43f96b6da26 e007e5cfd226 b42e2bceca6e 00531aec6f9a e4ad39ba6cef 504c3df6bbf4 eb1ec0b445ce b8955b91e8e5 f739ed8f3e59 07668d85ab3a a6045d125d7b 2ce17110e009 b45f64ab28ef 3a15945be9e1 2a0af99ae1d1 0ece927846d7 0202d5f5dfae 8ce743769d8f 2efb36567bd8 96b65c0493c5 e9ce89fa30e3 2018/07/18 11:42:07 Waiting for host: 192.168.66.101:22 2018/07/18 11:42:10 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/18 11:42:18 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/18 11:42:26 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/18 11:42:34 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/18 11:42:39 Connected to tcp://192.168.66.101:22 + inventory_file=/root/inventory + openshift_ansible=/root/openshift-ansible + echo '[new_nodes]' + sed -i '/\[OSEv3:children\]/a new_nodes' /root/inventory + nodes_found=false ++ seq 2 100 + for i in '$(seq 2 100)' ++ printf node%02d 2 + node=node02 ++ printf 192.168.66.1%02d 2 + node_ip=192.168.66.102 + set +e + ping 192.168.66.102 -c 1 PING 192.168.66.102 (192.168.66.102) 56(84) bytes of data. 64 bytes from 192.168.66.102: icmp_seq=1 ttl=64 time=0.892 ms --- 192.168.66.102 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 0.892/0.892/0.892/0.000 ms Found node02. Adding it to the inventory. + '[' 0 -ne 0 ']' + nodes_found=true + set -e + echo '192.168.66.102 node02' + echo 'Found node02. Adding it to the inventory.' + echo 'node02 openshift_node_group_name="node-config-compute" openshift_schedulable=true openshift_ip=192.168.66.102' + for i in '$(seq 2 100)' ++ printf node%02d 3 + node=node03 ++ printf 192.168.66.1%02d 3 + node_ip=192.168.66.103 + set +e + ping 192.168.66.103 -c 1 PING 192.168.66.103 (192.168.66.103) 56(84) bytes of data. From 192.168.66.101 icmp_seq=1 Destination Host Unreachable --- 192.168.66.103 ping statistics --- 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms + '[' 1 -ne 0 ']' + break + '[' true = true ']' + ansible-playbook -i /root/inventory /root/openshift-ansible/playbooks/openshift-node/scaleup.yml PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) PLAY [Ensure there are new_nodes] ********************************************** TASK [fail] ******************************************************************** skipping: [localhost] TASK [fail] ******************************************************************** skipping: [localhost] PLAY [Initialization Checkpoint Start] ***************************************** TASK [Set install initialization 'In Progress'] ******************************** ok: [node01] PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) [WARNING]: Could not match supplied host pattern, ignoring: oo_lb_to_config PLAY [Ensure that all non-node hosts are accessible] *************************** TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Initialize basic host facts] ********************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/deprecations.yml for node01, node02 TASK [openshift_sanitize_inventory : Check for usage of deprecated variables] *** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : debug] ************************************ skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_stats] ******************************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Assign deprecated variables to correct counterparts] *** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml for node01, node02 included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml for node01, node02 TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Standardize on latest variable names] ***** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Normalize openshift_release] ************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/unsupported.yml for node01, node02 TASK [openshift_sanitize_inventory : Ensure that openshift_use_dnsmasq is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that openshift_node_dnsmasq_install_network_manager_hook is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* skipping: [node01] => (item=openshift_hosted_etcd_storage_kind) skipping: [node02] => (item=openshift_hosted_etcd_storage_kind) TASK [openshift_sanitize_inventory : Ensure that dynamic provisioning is set if using dynamic storage] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure clusterid is set along with the cloudprovider] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that all requires vsphere configuration variables are set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : ensure provider configuration variables are defined] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure removed web console extension variables are not set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that web console port matches API server port] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : At least one master is schedulable] ******* skipping: [node01] skipping: [node02] TASK [Detecting Operating System from ostree_booted] *************************** ok: [node02] ok: [node01] TASK [set openshift_deployment_type if unset] ********************************** skipping: [node01] skipping: [node02] TASK [check for node already bootstrapped] ************************************* ok: [node02] ok: [node01] TASK [initialize_facts set fact openshift_is_bootstrapped] ********************* ok: [node01] ok: [node02] TASK [initialize_facts set fact openshift_is_atomic and openshift_is_containerized] *** ok: [node01] ok: [node02] TASK [Determine Atomic Host Docker Version] ************************************ skipping: [node01] skipping: [node02] TASK [assert atomic host docker version is 1.12 or later] ********************** skipping: [node01] skipping: [node02] PLAY [Retrieve existing master configs and validate] *************************** TASK [openshift_control_plane : stat] ****************************************** ok: [node01] TASK [openshift_control_plane : slurp] ***************************************** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [openshift_control_plane : Check for file paths outside of /etc/origin/master in master's config] *** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Initialize special first-master variables] ******************************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Disable web console if required] ***************************************** TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Setup yum repositories for all hosts] ************************************ TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Install Red Hat Subscription manager] ******************* skipping: [node02] TASK [rhel_subscribe : Is host already registered?] **************************** skipping: [node02] TASK [rhel_subscribe : Register host] ****************************************** skipping: [node02] TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Determine if OpenShift Pool Already Attached] *********** skipping: [node02] TASK [rhel_subscribe : Attach to OpenShift Pool] ******************************* skipping: [node02] TASK [rhel_subscribe : Satellite preparation] ********************************** skipping: [node02] TASK [openshift_repos : openshift_repos detect ostree] ************************* ok: [node02] TASK [openshift_repos : Ensure libselinux-python is installed] ***************** ok: [node02] TASK [openshift_repos : Remove openshift_additional.repo file] ***************** ok: [node02] TASK [openshift_repos : Create any additional repos that are defined] ********** TASK [openshift_repos : include_tasks] ***************************************** skipping: [node02] TASK [openshift_repos : include_tasks] ***************************************** included: /root/openshift-ansible/roles/openshift_repos/tasks/centos_repos.yml for node02 TASK [openshift_repos : Configure origin gpg keys] ***************************** ok: [node02] TASK [openshift_repos : Configure correct origin release repository] *********** ok: [node02] => (item=/root/openshift-ansible/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2) TASK [openshift_repos : Ensure clean repo cache in the event repos have been changed manually] *** changed: [node02] => { "msg": "First run of openshift_repos" } TASK [openshift_repos : Record that openshift_repos already ran] *************** ok: [node02] RUNNING HANDLER [openshift_repos : refresh cache] ****************************** changed: [node02] PLAY [Install packages necessary for installer] ******************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Determine if chrony is installed] **************************************** [WARNING]: Consider using the yum, dnf or zypper module rather than running rpm. If you need to use command because yum, dnf or zypper is insufficient you can add warn=False to this command task or set command_warnings=False in ansible.cfg to get rid of this message. changed: [node02] TASK [Install ntp package] ***************************************************** skipping: [node02] TASK [Start and enable ntpd/chronyd] ******************************************* changed: [node02] TASK [Ensure openshift-ansible installer package deps are installed] *********** ok: [node02] => (item=iproute) ok: [node02] => (item=dbus-python) ok: [node02] => (item=PyYAML) ok: [node02] => (item=python-ipaddress) ok: [node02] => (item=libsemanage-python) ok: [node02] => (item=yum-utils) ok: [node02] => (item=python-docker) PLAY [Initialize cluster facts] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [get openshift_current_version] ******************************************* ok: [node02] ok: [node01] TASK [set_fact openshift_portal_net if present on masters] ********************* ok: [node01] ok: [node02] TASK [Gather Cluster facts] **************************************************** changed: [node02] changed: [node01] TASK [Set fact of no_proxy_internal_hostnames] ********************************* skipping: [node01] skipping: [node02] TASK [Initialize openshift.node.sdn_mtu] *************************************** changed: [node02] ok: [node01] PLAY [Initialize etcd host variables] ****************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** ok: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] => { "msg": "openshift_pkg_version was not defined. Falling back to -3.10.0" } TASK [openshift_version : set_fact] ******************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Verify Requirements] ***************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [Run variable sanity checks] ********************************************** ok: [node01] TASK [Validate openshift_node_groups and openshift_node_group_name] ************ ok: [node01] PLAY [Initialization Checkpoint End] ******************************************* TASK [Set install initialization 'Complete'] *********************************** ok: [node01] PLAY [Validate node hostnames] ************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Query DNS for IP address of node02] ************************************** ok: [node02] TASK [Validate openshift_hostname when defined] ******************************** skipping: [node02] TASK [Validate openshift_ip exists on node when defined] *********************** skipping: [node02] PLAY [Configure os_firewall] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [os_firewall : Detecting Atomic Host Operating System] ******************** ok: [node02] TASK [os_firewall : Set fact r_os_firewall_is_atomic] ************************** ok: [node02] TASK [os_firewall : Fail - Firewalld is not supported on Atomic Host] ********** skipping: [node02] TASK [os_firewall : Install firewalld packages] ******************************** skipping: [node02] TASK [os_firewall : Ensure iptables services are not enabled] ****************** skipping: [node02] => (item=iptables) skipping: [node02] => (item=ip6tables) TASK [os_firewall : Wait 10 seconds after disabling iptables] ****************** skipping: [node02] TASK [os_firewall : Start and enable firewalld service] ************************ skipping: [node02] TASK [os_firewall : need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail] *** skipping: [node02] TASK [os_firewall : Restart polkitd] ******************************************* skipping: [node02] TASK [os_firewall : Wait for polkit action to have been created] *************** skipping: [node02] TASK [os_firewall : Ensure firewalld service is not enabled] ******************* ok: [node02] TASK [os_firewall : Wait 10 seconds after disabling firewalld] ***************** skipping: [node02] TASK [os_firewall : Install iptables packages] ********************************* ok: [node02] => (item=iptables) ok: [node02] => (item=iptables-services) TASK [os_firewall : Start and enable iptables service] ************************* ok: [node02 -> node02] => (item=node02) TASK [os_firewall : need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail] *** skipping: [node02] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [container_runtime : Setup the docker-storage for overlay] **************** skipping: [node02] TASK [container_runtime : Create file system on extra volume device] *********** TASK [container_runtime : Create mount entry for extra volume] ***************** PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** ok: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* ok: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Add http_proxy to /etc/atomic.conf] ****************** skipping: [node02] TASK [container_runtime : Add https_proxy to /etc/atomic.conf] ***************** skipping: [node02] TASK [container_runtime : Add no_proxy to /etc/atomic.conf] ******************** skipping: [node02] TASK [container_runtime : Get current installed Docker version] **************** ok: [node02] TASK [container_runtime : Error out if Docker pre-installed but too old] ******* skipping: [node02] TASK [container_runtime : Error out if requested Docker is too old] ************ skipping: [node02] TASK [container_runtime : Install Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure docker.service.d directory exists] ************ ok: [node02] TASK [container_runtime : Configure Docker service unit file] ****************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Set registry params] ********************************* skipping: [node02] => (item={u'reg_conf_var': u'ADD_REGISTRY', u'reg_flag': u'--add-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'BLOCK_REGISTRY', u'reg_flag': u'--block-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'INSECURE_REGISTRY', u'reg_flag': u'--insecure-registry', u'reg_fact_val': []}) TASK [container_runtime : Place additional/blocked/insecure registries in /etc/containers/registries.conf] *** skipping: [node02] TASK [container_runtime : Set Proxy Settings] ********************************** skipping: [node02] => (item={u'reg_conf_var': u'HTTP_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'HTTPS_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'NO_PROXY', u'reg_fact_val': u''}) TASK [container_runtime : Set various Docker options] ************************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Configure Docker Network OPTIONS] ******************** ok: [node02] TASK [container_runtime : Detect if docker is already started] ***************** ok: [node02] TASK [container_runtime : Start the Docker service] **************************** ok: [node02] TASK [container_runtime : set_fact] ******************************************** ok: [node02] TASK [container_runtime : Check for docker_storage_path/overlay2] ************** ok: [node02] TASK [container_runtime : Fixup SELinux permissions for docker] **************** changed: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] TASK [container_runtime : Fail if Atomic Host since this is an rpm request] **** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Check that overlay is in the kernel] ***************** skipping: [node02] TASK [container_runtime : Add overlay to modprobe.d] *************************** skipping: [node02] TASK [container_runtime : Manually modprobe overlay into the kernel] *********** skipping: [node02] TASK [container_runtime : Enable and start systemd-modules-load] *************** skipping: [node02] TASK [container_runtime : Install cri-o] *************************************** skipping: [node02] TASK [container_runtime : Remove CRI-O default configuration files] ************ skipping: [node02] => (item=/etc/cni/net.d/200-loopback.conf) skipping: [node02] => (item=/etc/cni/net.d/100-crio-bridge.conf) TASK [container_runtime : Create the CRI-O configuration] ********************** skipping: [node02] TASK [container_runtime : Ensure CNI configuration directory exists] *********** skipping: [node02] TASK [container_runtime : Add iptables allow rules] **************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove iptables rules] ******************************* TASK [container_runtime : Add firewalld allow rules] *************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove firewalld allow rules] ************************ TASK [container_runtime : Configure the CNI network] *************************** skipping: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-network] ****************** skipping: [node02] TASK [container_runtime : Start the CRI-O service] ***************************** skipping: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* skipping: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** skipping: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** skipping: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role : openshift_version] **************************************** TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** skipping: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Node Preparation Checkpoint Start] *************************************** TASK [Set Node preparation 'In Progress'] ************************************** ok: [node01] PLAY [Only target nodes that have not yet been bootstrapped] ******************* TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [add_host] **************************************************************** skipping: [localhost] => (item=node02) ok: [localhost] => (item=node01) PLAY [Disable excluders] ******************************************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/disable.yml for node02 TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-docker-excluder version detected] ************ skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-docker-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-excluder version detected] ******************* skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** skipping: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] PLAY [Configure nodes] ********************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_cloud_provider : Set cloud provider facts] ********************* skipping: [node02] TASK [openshift_cloud_provider : Create cloudprovider config dir] ************** skipping: [node02] TASK [openshift_cloud_provider : include the defined cloud provider files] ***** skipping: [node02] TASK [openshift_node : fail] *************************************************** skipping: [node02] TASK [openshift_node : Check for NetworkManager service] *********************** ok: [node02] TASK [openshift_node : Set fact using_network_manager] ************************* ok: [node02] TASK [openshift_node : Install dnsmasq] **************************************** ok: [node02] TASK [openshift_node : ensure origin/node directory exists] ******************** changed: [node02] => (item=/etc/origin) changed: [node02] => (item=/etc/origin/node) TASK [openshift_node : Install NetworkManager during node_bootstrap provisioning] *** skipping: [node02] TASK [openshift_node : Install network manager dispatch script] **************** skipping: [node02] TASK [openshift_node : Install dnsmasq configuration] ************************** ok: [node02] TASK [openshift_node : Deploy additional dnsmasq.conf] ************************* skipping: [node02] TASK [openshift_node : Enable dnsmasq] ***************************************** ok: [node02] TASK [openshift_node : Install network manager dispatch script] **************** ok: [node02] TASK [openshift_node : Add iptables allow rules] ******************************* ok: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) ok: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) ok: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) ok: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) ok: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove iptables rules] ********************************** TASK [openshift_node : Add firewalld allow rules] ****************************** skipping: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) skipping: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) skipping: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) skipping: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) skipping: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove firewalld allow rules] *************************** TASK [openshift_node : Checking for journald.conf] ***************************** ok: [node02] TASK [openshift_node : Create journald persistence directories] **************** ok: [node02] TASK [openshift_node : Update journald setup] ********************************** ok: [node02] => (item={u'var': u'Storage', u'val': u'persistent'}) ok: [node02] => (item={u'var': u'Compress', u'val': True}) ok: [node02] => (item={u'var': u'SyncIntervalSec', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitInterval', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitBurst', u'val': 10000}) ok: [node02] => (item={u'var': u'SystemMaxUse', u'val': u'8G'}) ok: [node02] => (item={u'var': u'SystemKeepFree', u'val': u'20%'}) ok: [node02] => (item={u'var': u'SystemMaxFileSize', u'val': u'10M'}) ok: [node02] => (item={u'var': u'MaxRetentionSec', u'val': u'1month'}) ok: [node02] => (item={u'var': u'MaxFileSec', u'val': u'1day'}) ok: [node02] => (item={u'var': u'ForwardToSyslog', u'val': False}) ok: [node02] => (item={u'var': u'ForwardToWall', u'val': False}) TASK [openshift_node : Restart journald] *************************************** skipping: [node02] TASK [openshift_node : Disable swap] ******************************************* ok: [node02] TASK [openshift_node : Install node, clients, and conntrack packages] ********** ok: [node02] => (item={u'name': u'origin-node-3.10.0*'}) ok: [node02] => (item={u'name': u'origin-clients-3.10.0*'}) ok: [node02] => (item={u'name': u'conntrack-tools'}) TASK [openshift_node : Restart cri-o] ****************************************** skipping: [node02] TASK [openshift_node : restart NetworkManager to ensure resolv.conf is present] *** changed: [node02] TASK [openshift_node : sysctl] ************************************************* ok: [node02] TASK [openshift_node : Check for credentials file for registry auth] *********** skipping: [node02] TASK [openshift_node : Create credentials for registry auth] ******************* skipping: [node02] TASK [openshift_node : Create credentials for registry auth (alternative)] ***** skipping: [node02] TASK [openshift_node : Setup ro mount of /root/.docker for containerized hosts] *** skipping: [node02] TASK [openshift_node : Check that node image is present] *********************** changed: [node02] TASK [openshift_node : Pre-pull node image] ************************************ skipping: [node02] TASK [openshift_node : Copy node script to the node] *************************** ok: [node02] TASK [openshift_node : Install Node service file] ****************************** ok: [node02] TASK [openshift_node : Ensure old system path is set] ************************** skipping: [node02] => (item=/etc/origin/openvswitch) skipping: [node02] => (item=/var/lib/kubelet) skipping: [node02] => (item=/opt/cni/bin) TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Copy node container image to ostree storage] ************ skipping: [node02] TASK [openshift_node : Install or Update node system container] **************** skipping: [node02] TASK [openshift_node : Restart network manager to ensure networking configuration is in place] *** skipping: [node02] TASK [openshift_node : Configure Node settings] ******************************** ok: [node02] => (item={u'regex': u'^OPTIONS=', u'line': u'OPTIONS='}) ok: [node02] => (item={u'regex': u'^DEBUG_LOGLEVEL=', u'line': u'DEBUG_LOGLEVEL=2'}) ok: [node02] => (item={u'regex': u'^IMAGE_VERSION=', u'line': u'IMAGE_VERSION=v3.10.0-rc.0'}) TASK [openshift_node : Configure Proxy Settings] ******************************* skipping: [node02] => (item={u'regex': u'^HTTP_PROXY=', u'line': u'HTTP_PROXY='}) skipping: [node02] => (item={u'regex': u'^HTTPS_PROXY=', u'line': u'HTTPS_PROXY='}) skipping: [node02] => (item={u'regex': u'^NO_PROXY=', u'line': u'NO_PROXY=[],172.30.0.0/16,10.128.0.0/14'}) TASK [openshift_node : file] *************************************************** skipping: [node02] TASK [openshift_node : Create the Node config] ********************************* changed: [node02] TASK [openshift_node : Configure Node Environment Variables] ******************* TASK [openshift_node : Ensure the node static pod directory exists] ************ changed: [node02] TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Install NFS storage plugin dependencies] **************** ok: [node02] TASK [openshift_node : Check for existence of nfs sebooleans] ****************** ok: [node02] => (item=virt_use_nfs) ok: [node02] => (item=virt_sandbox_use_nfs) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-18 11:50:35.762720', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.036702', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-18 11:50:35.726018', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-18 11:50:36.932335', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.016812', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-18 11:50:36.915523', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-18 11:50:35.762720', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.036702', '_ansible_item_label': u'virt_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-18 11:50:35.726018', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-18 11:50:36.932335', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.016812', '_ansible_item_label': u'virt_sandbox_use_nfs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-18 11:50:36.915523', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install GlusterFS storage plugin dependencies] ********** ok: [node02] TASK [openshift_node : Check for existence of fusefs sebooleans] *************** ok: [node02] => (item=virt_use_fusefs) ok: [node02] => (item=virt_sandbox_use_fusefs) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-18 11:50:43.650272', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.016723', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-18 11:50:43.633549', '_ansible_ignore_errors': None, 'failed': False}) ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-18 11:50:44.951990', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.016661', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-18 11:50:44.935329', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-18 11:50:43.650272', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.016723', '_ansible_item_label': u'virt_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-18 11:50:43.633549', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-18 11:50:44.951990', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.016661', '_ansible_item_label': u'virt_sandbox_use_fusefs', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'warn': True, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'argv': None, u'creates': None, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-18 11:50:44.935329', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install Ceph storage plugin dependencies] *************** ok: [node02] TASK [openshift_node : Install iSCSI storage plugin dependencies] ************** ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=device-mapper-multipath) TASK [openshift_node : restart services] *************************************** ok: [node02] => (item=multipathd) ok: [node02] => (item=rpcbind) ok: [node02] => (item=iscsid) TASK [openshift_node : Template multipath configuration] *********************** changed: [node02] TASK [openshift_node : Enable and start multipath] ***************************** changed: [node02] TASK [tuned : Check for tuned package] ***************************************** ok: [node02] TASK [tuned : Set tuned OpenShift variables] *********************************** ok: [node02] TASK [tuned : Ensure directory structure exists] ******************************* ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Ensure files are populated from templates] *********************** skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) skipping: [node02] => (item={'serole': 'object_r', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'setype': 'admin_home_t', 'state': 'directory', 'gid': 0, 'mode': '0755', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'ctime': 1531032437.8490183}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1531032437.8490183, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1531032437.8490183, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': u's0', 'seuser': u'unconfined_u', 'serole': u'object_r', 'ctime': 1531032437.8490183, 'state': u'file', 'gid': 0, 'mode': u'0644', 'mtime': 1531032437.8490183, 'owner': u'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': u'admin_home_t'}) TASK [tuned : Make tuned use the recommended tuned profile on restart] ********* changed: [node02] => (item=/etc/tuned/active_profile) changed: [node02] => (item=/etc/tuned/profile_mode) TASK [tuned : Restart tuned service] ******************************************* changed: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Install logrotate] ******* ok: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Setup logrotate.d scripts] *** PLAY [node bootstrap config] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_node : install needed rpm(s)] ********************************** ok: [node02] => (item=origin-node) ok: [node02] => (item=origin-docker-excluder) ok: [node02] => (item=ansible) ok: [node02] => (item=bash-completion) ok: [node02] => (item=docker) ok: [node02] => (item=haproxy) ok: [node02] => (item=dnsmasq) ok: [node02] => (item=ntp) ok: [node02] => (item=logrotate) ok: [node02] => (item=httpd-tools) ok: [node02] => (item=bind-utils) ok: [node02] => (item=firewalld) ok: [node02] => (item=libselinux-python) ok: [node02] => (item=conntrack-tools) ok: [node02] => (item=openssl) ok: [node02] => (item=iproute) ok: [node02] => (item=python-dbus) ok: [node02] => (item=PyYAML) ok: [node02] => (item=yum-utils) ok: [node02] => (item=glusterfs-fuse) ok: [node02] => (item=device-mapper-multipath) ok: [node02] => (item=nfs-utils) ok: [node02] => (item=cockpit-ws) ok: [node02] => (item=cockpit-system) ok: [node02] => (item=cockpit-bridge) ok: [node02] => (item=cockpit-docker) ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=ceph-common) TASK [openshift_node : create the directory for node] ************************** skipping: [node02] TASK [openshift_node : laydown systemd override] ******************************* skipping: [node02] TASK [openshift_node : update the sysconfig to have necessary variables] ******* ok: [node02] => (item={u'regexp': u'^KUBECONFIG=.*', u'line': u'KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig'}) TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : disable origin-node service] **************************** changed: [node02] => (item=origin-node.service) TASK [openshift_node : Check for RPM generated config marker file .config_managed] *** ok: [node02] TASK [openshift_node : create directories for bootstrapping] ******************* ok: [node02] => (item=/root/openshift_bootstrap) changed: [node02] => (item=/var/lib/origin/openshift.local.config) changed: [node02] => (item=/var/lib/origin/openshift.local.config/node) ok: [node02] => (item=/etc/docker/certs.d/docker-registry.default.svc:5000) TASK [openshift_node : laydown the bootstrap.yml file for on boot configuration] *** ok: [node02] TASK [openshift_node : Create a symlink to the node client CA for the docker registry] *** ok: [node02] TASK [openshift_node : Remove RPM generated config files if present] *********** skipping: [node02] => (item=master) skipping: [node02] => (item=.config_managed) TASK [openshift_node : find all files in /etc/origin/node so we can remove them] *** skipping: [node02] TASK [openshift_node : Remove everything except the resolv.conf required for node] *** skipping: [node02] TASK [openshift_node_group : create node config template] ********************** changed: [node02] TASK [openshift_node_group : remove existing node config] ********************** changed: [node02] TASK [openshift_node_group : Ensure required directories are present] ********** ok: [node02] => (item=/etc/origin/node/pods) changed: [node02] => (item=/etc/origin/node/certificates) TASK [openshift_node_group : Update the sysconfig to group "node-config-compute"] *** changed: [node02] TASK [set_fact] **************************************************************** ok: [node02] PLAY [Re-enable excluder if it was previously enabled] ************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/enable.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] PLAY [Node Preparation Checkpoint End] ***************************************** TASK [Set Node preparation 'Complete'] ***************************************** ok: [node01] PLAY [Distribute bootstrap and start nodes] ************************************ TASK [openshift_node : Gather node information] ******************************** changed: [node02] ok: [node01] TASK [openshift_node : Copy master bootstrap config locally] ******************* ok: [node02] TASK [openshift_node : Distribute bootstrap kubeconfig if one does not exist] *** ok: [node01] changed: [node02] TASK [openshift_node : Start and enable node for bootstrapping] **************** changed: [node02] changed: [node01] TASK [openshift_node : Get node logs] ****************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : debug] ************************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : fail] *************************************************** skipping: [node02] skipping: [node01] PLAY [Approve any pending CSR requests from inventory nodes] ******************* TASK [Dump all candidate bootstrap hostnames] ********************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Find all hostnames for bootstrapping] ************************************ ok: [node01] TASK [Dump the bootstrap hostnames] ******************************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Approve bootstrap nodes] ************************************************* changed: [node01] TASK [Get CSRs] **************************************************************** skipping: [node01] TASK [Report approval errors] ************************************************** skipping: [node01] PLAY [Ensure any inventory labels are applied to the nodes] ******************** TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_manage_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_manage_node : Wait for Node Registration] ********************** ok: [node01 -> node01] ok: [node02 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/config.yml for node02, node01 TASK [openshift_manage_node : Set node schedulability] ************************* ok: [node02 -> node01] ok: [node01 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/set_default_node_role.yml for node02, node01 TASK [openshift_manage_node : Retrieve nodes that are marked with the infra selector or the legacy infra selector] *** ok: [node02 -> node01] TASK [openshift_manage_node : Label infra or legacy infra nodes with the new role label] *** TASK [openshift_manage_node : Retrieve non-infra, non-master nodes that are not yet labeled compute] *** ok: [node02 -> node01] TASK [openshift_manage_node : label non-master non-infra nodes compute] ******** TASK [openshift_manage_node : Label all-in-one master as a compute node] ******* skipping: [node02] PLAY RECAP ********************************************************************* localhost : ok=30 changed=0 unreachable=0 failed=0 node01 : ok=71 changed=3 unreachable=0 failed=0 node02 : ok=155 changed=33 unreachable=0 failed=0 INSTALLER STATUS *************************************************************** Initialization : Complete (0:03:58) Node Preparation : Complete (0:04:20) Sending file modes: C0755 110489328 oc Sending file modes: C0600 5649 admin.kubeconfig Cluster "node01:8443" set. Cluster "node01:8443" set. + set +e + kubectl get nodes --no-headers + cluster/kubectl.sh get nodes --no-headers node01 Ready compute,infra,master 10d v1.10.0+b81c8f8 node02 Ready compute 55s v1.10.0+b81c8f8 + kubectl_rc=0 + '[' 0 -ne 0 ']' ++ kubectl get nodes --no-headers ++ grep NotReady ++ cluster/kubectl.sh get nodes --no-headers + '[' -n '' ']' + set -e + echo 'Nodes are ready:' Nodes are ready: + kubectl get nodes + cluster/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION node01 Ready compute,infra,master 10d v1.10.0+b81c8f8 node02 Ready compute 56s v1.10.0+b81c8f8 + make cluster-sync ./cluster/build.sh Building ... sha256:eac86de70a4e6cb392340c5eb3c9e29aa4eee64229c68e6e8a3ba9514fb773e5 go version go1.10 linux/amd64 go version go1.10 linux/amd64 make[1]: Entering directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' hack/dockerized "./hack/check.sh && KUBEVIRT_VERSION= ./hack/build-go.sh install " && ./hack/build-copy-artifacts.sh sha256:eac86de70a4e6cb392340c5eb3c9e29aa4eee64229c68e6e8a3ba9514fb773e5 go version go1.10 linux/amd64 go version go1.10 linux/amd64 find: '/root/go/src/kubevirt.io/kubevirt/_out/cmd': No such file or directory Compiling tests... compiled tests.test hack/build-docker.sh build Sending build context to Docker daemon 38.81 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 401035e513d8 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-controller ---> Using cache ---> 138d4f372f95 Step 4/8 : WORKDIR /home/virt-controller ---> Using cache ---> a5be079f2ad5 Step 5/8 : USER 1001 ---> Using cache ---> a8da3331f8c9 Step 6/8 : COPY virt-controller /usr/bin/virt-controller ---> Using cache ---> 47f57629b5b0 Step 7/8 : ENTRYPOINT /usr/bin/virt-controller ---> Using cache ---> d99eece5851a Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-controller" '' ---> Running in bd03c50202ec ---> 5acde793ee4a Removing intermediate container bd03c50202ec Successfully built 5acde793ee4a Sending build context to Docker daemon 41.02 MB Step 1/10 : FROM kubevirt/libvirt:4.2.0 ---> 5f0bfe81a3e0 Step 2/10 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dc5562afdf06 Step 3/10 : RUN dnf -y install socat genisoimage util-linux libcgroup-tools ethtool net-tools sudo && dnf -y clean all && test $(id -u qemu) = 107 # make sure that the qemu user really is 107 ---> Using cache ---> 67916fb6391a Step 4/10 : COPY virt-launcher /usr/bin/virt-launcher ---> Using cache ---> 2d61dfbcc656 Step 5/10 : COPY kubevirt-sudo /etc/sudoers.d/kubevirt ---> Using cache ---> 78b3c807b9df Step 6/10 : RUN setcap CAP_NET_BIND_SERVICE=+eip /usr/bin/qemu-system-x86_64 ---> Using cache ---> 0f154c8fbf26 Step 7/10 : RUN mkdir -p /usr/share/kubevirt/virt-launcher ---> Using cache ---> d652277fe07d Step 8/10 : COPY entrypoint.sh libvirtd.sh sock-connector /usr/share/kubevirt/virt-launcher/ ---> Using cache ---> c7af080c3331 Step 9/10 : ENTRYPOINT /usr/share/kubevirt/virt-launcher/entrypoint.sh ---> Using cache ---> 0b9e21c17420 Step 10/10 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-launcher" '' ---> Running in ca508369fa0c ---> 8088031a2823 Removing intermediate container ca508369fa0c Successfully built 8088031a2823 Sending build context to Docker daemon 40.1 MB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 401035e513d8 Step 3/5 : COPY virt-handler /usr/bin/virt-handler ---> Using cache ---> b30ce6dbb9c7 Step 4/5 : ENTRYPOINT /usr/bin/virt-handler ---> Using cache ---> 9363f79ecca8 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-handler" '' ---> Running in af47fcf42d21 ---> bf03ff5a4f5c Removing intermediate container af47fcf42d21 Successfully built bf03ff5a4f5c Sending build context to Docker daemon 37.02 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 401035e513d8 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-api ---> Using cache ---> 425f1b8d360c Step 4/8 : WORKDIR /home/virt-api ---> Using cache ---> 109325fd6af7 Step 5/8 : USER 1001 ---> Using cache ---> e638e9684a2f Step 6/8 : COPY virt-api /usr/bin/virt-api ---> Using cache ---> 82825a629635 Step 7/8 : ENTRYPOINT /usr/bin/virt-api ---> Using cache ---> ae6eeb819789 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "virt-api" '' ---> Running in 17ee0208228f ---> e92e8b129a81 Removing intermediate container 17ee0208228f Successfully built e92e8b129a81 Sending build context to Docker daemon 4.096 kB Step 1/7 : FROM fedora:28 ---> cc510acfcd70 Step 2/7 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 401035e513d8 Step 3/7 : ENV container docker ---> Using cache ---> c41fed4a1333 Step 4/7 : RUN mkdir -p /images/custom /images/alpine && truncate -s 64M /images/custom/disk.img && curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /images/alpine/disk.img ---> Using cache ---> 940d88594d2e Step 5/7 : ADD entrypoint.sh / ---> Using cache ---> 923b84390ce2 Step 6/7 : CMD /entrypoint.sh ---> Using cache ---> e9ddd62d459f Step 7/7 : LABEL "disks-images-provider" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in f37f33a966f1 ---> fd366ac1693d Removing intermediate container f37f33a966f1 Successfully built fd366ac1693d Sending build context to Docker daemon 2.56 kB Step 1/5 : FROM fedora:28 ---> cc510acfcd70 Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 401035e513d8 Step 3/5 : ENV container docker ---> Using cache ---> c41fed4a1333 Step 4/5 : RUN dnf -y install procps-ng nmap-ncat && dnf -y clean all ---> Using cache ---> 944f01c7c457 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "vm-killer" '' ---> Running in 3caf2f47305b ---> 049843b6df91 Removing intermediate container 3caf2f47305b Successfully built 049843b6df91 Sending build context to Docker daemon 5.12 kB Step 1/7 : FROM debian:sid ---> 68f33cf86aab Step 2/7 : MAINTAINER "David Vossel" \ ---> Using cache ---> 760b7aedd755 Step 3/7 : ENV container docker ---> Using cache ---> 242765a70aa0 Step 4/7 : RUN apt-get update && apt-get install -y bash curl bzip2 qemu-utils && mkdir -p /disk && rm -rf /var/lib/apt/lists/* ---> Using cache ---> b671cb63e24f Step 5/7 : ADD entry-point.sh / ---> Using cache ---> 96395ae20289 Step 6/7 : CMD /entry-point.sh ---> Using cache ---> 281b61469fe1 Step 7/7 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "registry-disk-v1alpha" '' ---> Running in 560ee4ccf207 ---> b7b2e0f9805c Removing intermediate container 560ee4ccf207 Successfully built b7b2e0f9805c Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32776/kubevirt/registry-disk-v1alpha:devel ---> b7b2e0f9805c Step 2/4 : MAINTAINER "David Vossel" \ ---> Running in 4cf5b5e42a90 ---> 14dc6b7fc9f9 Removing intermediate container 4cf5b5e42a90 Step 3/4 : RUN curl https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img > /disk/cirros.img ---> Running in 66630f97e5a4  % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total  Spent Left  Speed  0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 6 12.1M 6 832k 0 0 897k 0 0:00:13 --:--:-- 0:00:13 896k 100 12.1M 100 12.1M 0 0 8418k  0 0:00:01 0:00:01 --:--:-- 8418k  ---> 4534158c32e1 Removing intermediate container 66630f97e5a4 Step 4/4 : LABEL "cirros-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in 9e8685346365 ---> c86801f4a3cd Removing intermediate container 9e8685346365 Successfully built c86801f4a3cd Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32776/kubevirt/registry-disk-v1alpha:devel ---> b7b2e0f9805c Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Running in 7e033f856b56 ---> 1f2867ab69ec Removing intermediate container 7e033f856b56 Step 3/4 : RUN curl -g -L https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 > /disk/fedora.qcow2 ---> Running in 14c6ca2951fc  % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0  0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 0 221M 0 229k 0 0 99799 0 0:38:47 0:00:02 0:38:45 115k 1 221M 1 2732k 0 0 814k  0 0:04:38 0:00:03 0:04:35 914k 8 221M 8 19.2M 0 0 4536k 0 0:00:50 0:00:04 0:00:46 4955k 23 221M 23 51.5M 0 0 9883k 0 0:00:22 0:00:05 0:00:17 10.3M 35 221M 35 78.5M 0 0 12.3M  0 0:00:17 0:00:06 0:00:11 16.6M 43 221M 43 96.5M 0 0 13.1M 0 0:00:16 0:00:07 0:00:09 19.2M 52 221M 52 115M 0 0 13.8M 0 0:00:16 0:00:08 0:00:08 22.6M 60  221M   60 134M   0    0 14.3M 0 0:00:15 0:00:09 0:00:06 22.8M 65 221M 65 145M 0 0 14.0M 0 0:00:15 0:00:10 0:00:05 18.6M 70 221M 70 156M 0 0 13.7M 0 0:00:16 0:00:11 0:00:05 15.4M 74 221M 74 166M 0 0 13.4M 0 0:00:16 0:00:12 0:00:04 13.9M 78 221M 78 174M 0 0  13.1M 0 0:00:16 0:00:13 0:00:03 11.8M 83 221M 83 184M 0 0 12.8M 0 0:00:17 0:00:14 0:00:03 10.0M 87 221M 87 193M  0 0 12.6M 0 0:00:17 0:00:15 0:00:02 9974k 90 221M 90 201M 0 0 12.2M 0 0:00:18 0:00:16 0:00:02 9204k 94 221M 94 208M 0 0 12.0M  0 0:00:18 0:00:17 0:00:01 8774k 97 221M 97 216M 0 0 11.8M 0 0:00:18 0:00:18 --:--:-- 8585k 100 221M 100 221M 0 0 11.6M 0 0:00:18 0:00:18 --:--:-- 8321k  ---> 531d5b9750cf Removing intermediate container 14c6ca2951fc Step 4/4 : LABEL "fedora-cloud-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in e18523e558e0 ---> bf9d7a11ce09 Removing intermediate container e18523e558e0 Successfully built bf9d7a11ce09 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32776/kubevirt/registry-disk-v1alpha:devel ---> b7b2e0f9805c Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 1f2867ab69ec Step 3/4 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /disk/alpine.iso ---> Running in ca65a9154a36  % Total % Received % Xferd Average Speed Time Time Time Current  Dload Upload Total Spent Left  Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 37.0M 0 15048 0 0 84067 0 0:07:41 --:--:-- 0:07:41 83600 33 37.0M 33 12.5M 0 0 10.6M 0 0:00:03 0:00:01 0:00:02 10.6M 66 37.0M 66 24.7M 0 0 11.4M 0 0:00:03 0:00:02 0:00:01 11.3M 100 37.0M 100 37.0M 0 0 11.7M 0 0:00:03 0:00:03 --:--:-- 11.7M  ---> b4074fac3fa1 Removing intermediate container ca65a9154a36 Step 4/4 : LABEL "alpine-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in f46ef61f218e ---> 01cba826abd3 Removing intermediate container f46ef61f218e Successfully built 01cba826abd3 Sending build context to Docker daemon 34.04 MB Step 1/8 : FROM fedora:28 ---> cc510acfcd70 Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 401035e513d8 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virtctl ---> Using cache ---> 8ded2e37f9da Step 4/8 : WORKDIR /home/virtctl ---> Using cache ---> 2baf0c61c4e7 Step 5/8 : USER 1001 ---> Using cache ---> cddb35bbdd8e Step 6/8 : COPY subresource-access-test /subresource-access-test ---> Using cache ---> 98dfa4d0c899 Step 7/8 : ENTRYPOINT /subresource-access-test ---> Using cache ---> fd843cbb1efd Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "subresource-access-test" '' ---> Running in 36132aeb33fc ---> 461960d02968 Removing intermediate container 36132aeb33fc Successfully built 461960d02968 Sending build context to Docker daemon 3.072 kB Step 1/9 : FROM fedora:28 ---> cc510acfcd70 Step 2/9 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 401035e513d8 Step 3/9 : ENV container docker ---> Using cache ---> c41fed4a1333 Step 4/9 : RUN dnf -y install make git gcc && dnf -y clean all ---> Running in 904cc9ba2d3a Fedora 28 - x86_64 - Updates 543 kB/s | 19 MB 00:35 Fedora 28 - x86_64 1.7 MB/s | 60 MB 00:35 Last metadata expiration check: 0:00:09 ago on Wed Jul 18 11:59:01 2018. Dependencies resolved. ================================================================================ Package Arch Version Repository Size ================================================================================ Installing: gcc x86_64 8.1.1-1.fc28 updates 22 M git x86_64 2.17.1-3.fc28 updates 221 k make x86_64 1:4.2.1-6.fc28 fedora 497 k Upgrading: libgcc x86_64 8.1.1-1.fc28 updates 89 k Installing dependencies: binutils x86_64 2.29.1-23.fc28 updates 6.0 M cpp x86_64 8.1.1-1.fc28 updates 9.8 M emacs-filesystem noarch 1:26.1-3.fc28 updates 68 k fipscheck x86_64 1.5.0-4.fc28 fedora 26 k fipscheck-lib x86_64 1.5.0-4.fc28 fedora 14 k gc x86_64 7.6.4-3.fc28 fedora 108 k git-core x86_64 2.17.1-3.fc28 updates 4.0 M git-core-doc noarch 2.17.1-3.fc28 updates 2.3 M glibc-devel x86_64 2.27-8.fc28 fedora 1.0 M glibc-headers x86_64 2.27-8.fc28 fedora 454 k groff-base x86_64 1.22.3-15.fc28 fedora 1.0 M guile x86_64 5:2.0.14-7.fc28 fedora 3.5 M isl x86_64 0.16.1-6.fc28 fedora 841 k kernel-headers x86_64 4.17.6-200.fc28 updates 1.2 M less x86_64 530-1.fc28 fedora 163 k libatomic_ops x86_64 7.6.2-3.fc28 fedora 37 k libedit x86_64 3.1-23.20170329cvs.fc28 fedora 101 k libgomp x86_64 8.1.1-1.fc28 updates 198 k libmpc x86_64 1.0.2-9.fc28 fedora 58 k libpkgconf x86_64 1.4.2-1.fc28 fedora 34 k libsecret x86_64 0.18.6-1.fc28 fedora 162 k libstdc++ x86_64 8.1.1-1.fc28 updates 468 k libtool-ltdl x86_64 2.4.6-24.fc28 updates 57 k libxcrypt-devel x86_64 4.0.0-5.fc28 fedora 15 k ncurses x86_64 6.1-4.20180224.fc28 fedora 377 k openssh x86_64 7.7p1-5.fc28 updates 483 k openssh-clients x86_64 7.7p1-5.fc28 updates 684 k perl-Carp noarch 1.42-396.fc28 updates 29 k perl-Data-Dumper x86_64 2.167-399.fc28 fedora 57 k perl-Digest noarch 1.17-395.fc28 fedora 26 k perl-Digest-MD5 x86_64 2.55-396.fc28 fedora 36 k perl-Encode x86_64 4:2.97-3.fc28 fedora 1.5 M perl-Errno x86_64 1.28-412.fc28 updates 74 k perl-Error noarch 1:0.17025-2.fc28 fedora 45 k perl-Exporter noarch 5.72-396.fc28 fedora 33 k perl-File-Path noarch 2.15-2.fc28 fedora 37 k perl-File-Temp noarch 0.230.600-1.fc28 updates 62 k perl-Getopt-Long noarch 1:2.50-4.fc28 fedora 62 k perl-Git noarch 2.17.1-3.fc28 updates 73 k perl-HTTP-Tiny noarch 0.070-395.fc28 fedora 56 k perl-IO x86_64 1.38-412.fc28 updates 138 k perl-IO-Socket-IP noarch 0.39-5.fc28 fedora 46 k perl-MIME-Base64 x86_64 3.15-396.fc28 fedora 29 k perl-Net-SSLeay x86_64 1.85-1.fc28 fedora 356 k perl-PathTools x86_64 3.74-1.fc28 fedora 89 k perl-Pod-Escapes noarch 1:1.07-395.fc28 fedora 19 k perl-Pod-Perldoc noarch 3.28-396.fc28 fedora 87 k perl-Pod-Simple noarch 1:3.35-395.fc28 fedora 212 k perl-Pod-Usage noarch 4:1.69-395.fc28 fedora 33 k perl-Scalar-List-Utils x86_64 3:1.49-2.fc28 fedora 67 k perl-Socket x86_64 4:2.027-2.fc28 fedora 58 k perl-Storable x86_64 1:3.11-2.fc28 updates 97 k perl-Term-ANSIColor noarch 4.06-396.fc28 fedora 45 k perl-Term-Cap noarch 1.17-395.fc28 fedora 22 k perl-TermReadKey x86_64 2.37-7.fc28 fedora 39 k perl-Text-ParseWords noarch 3.30-395.fc28 fedora 17 k perl-Text-Tabs+Wrap noarch 2013.0523-395.fc28 fedora 23 k perl-Time-Local noarch 1:1.280-1.fc28 updates 32 k perl-URI noarch 1.73-2.fc28 fedora 115 k perl-Unicode-Normalize x86_64 1.25-396.fc28 fedora 81 k perl-constant noarch 1.33-396.fc28 fedora 24 k perl-interpreter x86_64 4:5.26.2-412.fc28 updates 6.2 M perl-libnet noarch 3.11-3.fc28 fedora 120 k perl-libs x86_64 4:5.26.2-412.fc28 updates 1.5 M perl-macros x86_64 4:5.26.2-412.fc28 updates 70 k perl-parent noarch 1:0.236-395.fc28 fedora 19 k perl-podlators noarch 4.11-1.fc28 updates 117 k perl-threads x86_64 1:2.21-2.fc28 fedora 60 k perl-threads-shared x86_64 1.58-2.fc28 fedora 46 k pkgconf x86_64 1.4.2-1.fc28 fedora 37 k pkgconf-m4 noarch 1.4.2-1.fc28 fedora 16 k pkgconf-pkg-config x86_64 1.4.2-1.fc28 fedora 14 k Installing weak dependencies: perl-IO-Socket-SSL noarch 2.056-1.fc28 fedora 285 k perl-Mozilla-CA noarch 20160104-7.fc28 fedora 14 k Transaction Summary ================================================================================ Install 77 Packages Upgrade 1 Package Total download size: 68 M Downloading Packages: (1/78): gc-7.6.4-3.fc28.x86_64.rpm 219 kB/s | 108 kB 00:00 (2/78): libatomic_ops-7.6.2-3.fc28.x86_64.rpm 210 kB/s | 37 kB 00:00 (3/78): make-4.2.1-6.fc28.x86_64.rpm 629 kB/s | 497 kB 00:00 (4/78): git-2.17.1-3.fc28.x86_64.rpm 317 kB/s | 221 kB 00:00 (5/78): guile-2.0.14-7.fc28.x86_64.rpm 1.7 MB/s | 3.5 MB 00:02 (6/78): git-core-doc-2.17.1-3.fc28.noarch.rpm 2.2 MB/s | 2.3 MB 00:01 (7/78): perl-Git-2.17.1-3.fc28.noarch.rpm 198 kB/s | 73 kB 00:00 (8/78): perl-Getopt-Long-2.50-4.fc28.noarch.rpm 201 kB/s | 62 kB 00:00 (9/78): git-core-2.17.1-3.fc28.x86_64.rpm 1.9 MB/s | 4.0 MB 00:02 (10/78): libsecret-0.18.6-1.fc28.x86_64.rpm 298 kB/s | 162 kB 00:00 (11/78): perl-TermReadKey-2.37-7.fc28.x86_64.rp 274 kB/s | 39 kB 00:00 (12/78): perl-PathTools-3.74-1.fc28.x86_64.rpm 238 kB/s | 89 kB 00:00 (13/78): perl-Error-0.17025-2.fc28.noarch.rpm 259 kB/s | 45 kB 00:00 (14/78): less-530-1.fc28.x86_64.rpm 617 kB/s | 163 kB 00:00 (15/78): perl-Exporter-5.72-396.fc28.noarch.rpm 266 kB/s | 33 kB 00:00 (16/78): perl-Text-ParseWords-3.30-395.fc28.noa 217 kB/s | 17 kB 00:00 (17/78): perl-constant-1.33-396.fc28.noarch.rpm 218 kB/s | 24 kB 00:00 (18/78): perl-Pod-Usage-1.69-395.fc28.noarch.rp 275 kB/s | 33 kB 00:00 (19/78): perl-Scalar-List-Utils-1.49-2.fc28.x86 386 kB/s | 67 kB 00:00 (20/78): perl-Pod-Perldoc-3.28-396.fc28.noarch. 404 kB/s | 87 kB 00:00 (21/78): perl-HTTP-Tiny-0.070-395.fc28.noarch.r 271 kB/s | 56 kB 00:00 (22/78): perl-parent-0.236-395.fc28.noarch.rpm 190 kB/s | 19 kB 00:00 (23/78): perl-MIME-Base64-3.15-396.fc28.x86_64. 258 kB/s | 29 kB 00:00 (24/78): perl-Pod-Simple-3.35-395.fc28.noarch.r 524 kB/s | 212 kB 00:00 (25/78): perl-Pod-Escapes-1.07-395.fc28.noarch. 254 kB/s | 19 kB 00:00 (26/78): perl-Socket-2.027-2.fc28.x86_64.rpm 419 kB/s | 58 kB 00:00 (27/78): perl-Text-Tabs+Wrap-2013.0523-395.fc28 261 kB/s | 23 kB 00:00 (28/78): groff-base-1.22.3-15.fc28.x86_64.rpm 1.1 MB/s | 1.0 MB 00:00 (29/78): isl-0.16.1-6.fc28.x86_64.rpm 1.4 MB/s | 841 kB 00:00 (30/78): libmpc-1.0.2-9.fc28.x86_64.rpm 668 kB/s | 58 kB 00:00 (31/78): cpp-8.1.1-1.fc28.x86_64.rpm 3.1 MB/s | 9.8 MB 00:03 (32/78): perl-interpreter-5.26.2-412.fc28.x86_6 2.2 MB/s | 6.2 MB 00:02 (33/78): perl-libs-5.26.2-412.fc28.x86_64.rpm 2.9 MB/s | 1.5 MB 00:00 (34/78): perl-File-Path-2.15-2.fc28.noarch.rpm 167 kB/s | 37 kB 00:00 (35/78): perl-Unicode-Normalize-1.25-396.fc28.x 273 kB/s | 81 kB 00:00 (36/78): perl-threads-2.21-2.fc28.x86_64.rpm 210 kB/s | 60 kB 00:00 (37/78): perl-threads-shared-1.58-2.fc28.x86_64 221 kB/s | 46 kB 00:00 (38/78): perl-Errno-1.28-412.fc28.x86_64.rpm 804 kB/s | 74 kB 00:00 (39/78): perl-Carp-1.42-396.fc28.noarch.rpm 248 kB/s | 29 kB 00:00 (40/78): perl-podlators-4.11-1.fc28.noarch.rpm 1.2 MB/s | 117 kB 00:00 (41/78): perl-Term-Cap-1.17-395.fc28.noarch.rpm 263 kB/s | 22 kB 00:00 (42/78): perl-Term-ANSIColor-4.06-396.fc28.noar 248 kB/s | 45 kB 00:00 (43/78): ncurses-6.1-4.20180224.fc28.x86_64.rpm 591 kB/s | 377 kB 00:00 (44/78): perl-File-Temp-0.230.600-1.fc28.noarch 350 kB/s | 62 kB 00:00 (45/78): perl-IO-1.38-412.fc28.x86_64.rpm 674 kB/s | 138 kB 00:00 (46/78): perl-Time-Local-1.280-1.fc28.noarch.rp 328 kB/s | 32 kB 00:00 (47/78): perl-Storable-3.11-2.fc28.x86_64.rpm 547 kB/s | 97 kB 00:00 (48/78): libtool-ltdl-2.4.6-24.fc28.x86_64.rpm 625 kB/s | 57 kB 00:00 (49/78): libstdc++-8.1.1-1.fc28.x86_64.rpm 1.6 MB/s | 468 kB 00:00 (50/78): perl-macros-5.26.2-412.fc28.x86_64.rpm 577 kB/s | 70 kB 00:00 (51/78): openssh-clients-7.7p1-5.fc28.x86_64.rp 1.5 MB/s | 684 kB 00:00 (52/78): perl-Encode-2.97-3.fc28.x86_64.rpm 650 kB/s | 1.5 MB 00:02 (53/78): fipscheck-lib-1.5.0-4.fc28.x86_64.rpm 188 kB/s | 14 kB 00:00 (54/78): openssh-7.7p1-5.fc28.x86_64.rpm 1.7 MB/s | 483 kB 00:00 (55/78): fipscheck-1.5.0-4.fc28.x86_64.rpm 186 kB/s | 26 kB 00:00 (56/78): libedit-3.1-23.20170329cvs.fc28.x86_64 490 kB/s | 101 kB 00:00 (57/78): emacs-filesystem-26.1-3.fc28.noarch.rp 744 kB/s | 68 kB 00:00 (58/78): glibc-devel-2.27-8.fc28.x86_64.rpm 624 kB/s | 1.0 MB 00:01 (59/78): libxcrypt-devel-4.0.0-5.fc28.x86_64.rp 189 kB/s | 15 kB 00:00 (60/78): glibc-headers-2.27-8.fc28.x86_64.rpm 535 kB/s | 454 kB 00:00 (61/78): libgomp-8.1.1-1.fc28.x86_64.rpm 443 kB/s | 198 kB 00:00 (62/78): binutils-2.29.1-23.fc28.x86_64.rpm 2.0 MB/s | 6.0 MB 00:03 (63/78): gcc-8.1.1-1.fc28.x86_64.rpm 2.1 MB/s | 22 MB 00:10 (64/78): pkgconf-pkg-config-1.4.2-1.fc28.x86_64 46 kB/s | 14 kB 00:00 (65/78): pkgconf-1.4.2-1.fc28.x86_64.rpm 146 kB/s | 37 kB 00:00 [MIRROR] kernel-headers-4.17.6-200.fc28.x86_64.rpm: Status code: 404 for http://mirror.chpc.utah.edu/pub/fedora/linux/updates/28/Everything/x86_64/Packages/k/kernel-headers-4.17.6-200.fc28.x86_64.rpm (66/78): pkgconf-m4-1.4.2-1.fc28.noarch.rpm 89 kB/s | 16 kB 00:00 (67/78): libpkgconf-1.4.2-1.fc28.x86_64.rpm 187 kB/s | 34 kB 00:00 (68/78): perl-Mozilla-CA-20160104-7.fc28.noarch 123 kB/s | 14 kB 00:00 (69/78): perl-IO-Socket-IP-0.39-5.fc28.noarch.r 234 kB/s | 46 kB 00:00 [MIRROR] kernel-headers-4.17.6-200.fc28.x86_64.rpm: Status code: 404 for https://mirror.chpc.utah.edu/pub/fedora/linux/updates/28/Everything/x86_64/Packages/k/kernel-headers-4.17.6-200.fc28.x86_64.rpm (70/78): perl-IO-Socket-SSL-2.056-1.fc28.noarch 466 kB/s | 285 kB 00:00 (71/78): perl-URI-1.73-2.fc28.noarch.rpm 501 kB/s | 115 kB 00:00 (72/78): perl-Data-Dumper-2.167-399.fc28.x86_64 330 kB/s | 57 kB 00:00 (73/78): perl-Net-SSLeay-1.85-1.fc28.x86_64.rpm 466 kB/s | 356 kB 00:00 (74/78): perl-libnet-3.11-3.fc28.noarch.rpm 982 kB/s | 120 kB 00:00 (75/78): perl-Digest-MD5-2.55-396.fc28.x86_64.r 317 kB/s | 36 kB 00:00 (76/78): perl-Digest-1.17-395.fc28.noarch.rpm 241 kB/s | 26 kB 00:00 (77/78): libgcc-8.1.1-1.fc28.x86_64.rpm 308 kB/s | 89 kB 00:00 (78/78): kernel-headers-4.17.6-200.fc28.x86_64. 680 kB/s | 1.2 MB 00:01 -------------------------------------------------------------------------------- Total 4.0 MB/s | 68 MB 00:17 Running transaction check Transaction check succeeded. Running transaction test Transaction test succeeded. Running transaction Preparing : 1/1 Installing : perl-libs-4:5.26.2-412.fc28.x86_64 1/79 Installing : perl-Carp-1.42-396.fc28.noarch 2/79 Installing : perl-Exporter-5.72-396.fc28.noarch 3/79 Installing : perl-Scalar-List-Utils-3:1.49-2.fc28.x86_64 4/79 Upgrading : libgcc-8.1.1-1.fc28.x86_64 5/79 Running scriptlet: libgcc-8.1.1-1.fc28.x86_64 5/79 Installing : fipscheck-1.5.0-4.fc28.x86_64 6/79 Installing : fipscheck-lib-1.5.0-4.fc28.x86_64 7/79 Running scriptlet: fipscheck-lib-1.5.0-4.fc28.x86_64 7/79 Installing : libstdc++-8.1.1-1.fc28.x86_64 8/79 Running scriptlet: libstdc++-8.1.1-1.fc28.x86_64 8/79 Installing : perl-Text-ParseWords-3.30-395.fc28.noarch 9/79 Installing : libmpc-1.0.2-9.fc28.x86_64 10/79 Running scriptlet: libmpc-1.0.2-9.fc28.x86_64 10/79 Installing : cpp-8.1.1-1.fc28.x86_64 11/79 Running scriptlet: cpp-8.1.1-1.fc28.x86_64 11/79 Installing : groff-base-1.22.3-15.fc28.x86_64 12/79 Running scriptlet: openssh-7.7p1-5.fc28.x86_64 13/79 Installing : openssh-7.7p1-5.fc28.x86_64 13/79 Installing : perl-Term-ANSIColor-4.06-396.fc28.noarch 14/79 Installing : perl-macros-4:5.26.2-412.fc28.x86_64 15/79 Installing : perl-constant-1.33-396.fc28.noarch 16/79 Installing : perl-parent-1:0.236-395.fc28.noarch 17/79 Installing : perl-Socket-4:2.027-2.fc28.x86_64 18/79 Installing : perl-Text-Tabs+Wrap-2013.0523-395.fc28.noarch 19/79 Installing : perl-File-Path-2.15-2.fc28.noarch 20/79 Installing : perl-Unicode-Normalize-1.25-396.fc28.x86_64 21/79 Installing : perl-threads-shared-1.58-2.fc28.x86_64 22/79 Installing : perl-threads-1:2.21-2.fc28.x86_64 23/79 Installing : perl-Errno-1.28-412.fc28.x86_64 24/79 Installing : perl-PathTools-3.74-1.fc28.x86_64 25/79 Installing : perl-interpreter-4:5.26.2-412.fc28.x86_64 26/79 Installing : perl-IO-1.38-412.fc28.x86_64 27/79 Installing : perl-MIME-Base64-3.15-396.fc28.x86_64 28/79 Installing : perl-Time-Local-1:1.280-1.fc28.noarch 29/79 Installing : perl-HTTP-Tiny-0.070-395.fc28.noarch 30/79 Installing : perl-File-Temp-0.230.600-1.fc28.noarch 31/79 Installing : perl-IO-Socket-IP-0.39-5.fc28.noarch 32/79 Installing : perl-Net-SSLeay-1.85-1.fc28.x86_64 33/79 Installing : perl-Digest-1.17-395.fc28.noarch 34/79 Installing : perl-Digest-MD5-2.55-396.fc28.x86_64 35/79 Installing : perl-libnet-3.11-3.fc28.noarch 36/79 Installing : perl-Storable-1:3.11-2.fc28.x86_64 37/79 Installing : perl-TermReadKey-2.37-7.fc28.x86_64 38/79 Installing : perl-Error-1:0.17025-2.fc28.noarch 39/79 Installing : perl-Pod-Escapes-1:1.07-395.fc28.noarch 40/79 Installing : perl-Data-Dumper-2.167-399.fc28.x86_64 41/79 Installing : kernel-headers-4.17.6-200.fc28.x86_64 42/79 Running scriptlet: glibc-headers-2.27-8.fc28.x86_64 43/79 Installing : glibc-headers-2.27-8.fc28.x86_64 43/79 Installing : libpkgconf-1.4.2-1.fc28.x86_64 44/79 Installing : pkgconf-1.4.2-1.fc28.x86_64 45/79 Installing : pkgconf-m4-1.4.2-1.fc28.noarch 46/79 Installing : pkgconf-pkg-config-1.4.2-1.fc28.x86_64 47/79 Installing : libxcrypt-devel-4.0.0-5.fc28.x86_64 48/79 Installing : glibc-devel-2.27-8.fc28.x86_64 49/79 Running scriptlet: glibc-devel-2.27-8.fc28.x86_64 49/79 Installing : libgomp-8.1.1-1.fc28.x86_64 50/79 Running scriptlet: libgomp-8.1.1-1.fc28.x86_64 50/79 Installing : binutils-2.29.1-23.fc28.x86_64 51/79 Running scriptlet: binutils-2.29.1-23.fc28.x86_64 51/79 install-info: No such file or directory for /usr/share/info/as.info.gz install-info: No such file or directory for /usr/share/info/binutils.info.gz install-info: No such file or directory for /usr/share/info/gprof.info.gz install-info: No such file or directory for /usr/share/info/ld.info.gz Installing : emacs-filesystem-1:26.1-3.fc28.noarch 52/79 Installing : libedit-3.1-23.20170329cvs.fc28.x86_64 53/79 Installing : openssh-clients-7.7p1-5.fc28.x86_64 54/79 Installing : libtool-ltdl-2.4.6-24.fc28.x86_64 55/79 Running scriptlet: libtool-ltdl-2.4.6-24.fc28.x86_64 55/79 Installing : ncurses-6.1-4.20180224.fc28.x86_64 56/79 Installing : perl-Term-Cap-1.17-395.fc28.noarch 57/79 Installing : perl-Pod-Simple-1:3.35-395.fc28.noarch 58/79 Installing : perl-Pod-Usage-4:1.69-395.fc28.noarch 59/79 Installing : perl-Getopt-Long-1:2.50-4.fc28.noarch 60/79 Installing : perl-Encode-4:2.97-3.fc28.x86_64 61/79 Installing : perl-podlators-4.11-1.fc28.noarch 62/79 Installing : perl-Pod-Perldoc-3.28-396.fc28.noarch 63/79 Installing : perl-URI-1.73-2.fc28.noarch 64/79 Installing : isl-0.16.1-6.fc28.x86_64 65/79 Running scriptlet: isl-0.16.1-6.fc28.x86_64 65/79 Installing : less-530-1.fc28.x86_64 66/79 Installing : git-core-2.17.1-3.fc28.x86_64 67/79 Installing : git-core-doc-2.17.1-3.fc28.noarch 68/79 Installing : libsecret-0.18.6-1.fc28.x86_64 69/79 Installing : perl-Git-2.17.1-3.fc28.noarch 70/79 Installing : git-2.17.1-3.fc28.x86_64 71/79 Installing : libatomic_ops-7.6.2-3.fc28.x86_64 72/79 Installing : gc-7.6.4-3.fc28.x86_64 73/79 Installing : guile-5:2.0.14-7.fc28.x86_64 74/79 Running scriptlet: guile-5:2.0.14-7.fc28.x86_64 74/79 Installing : make-1:4.2.1-6.fc28.x86_64 75/79 Running scriptlet: make-1:4.2.1-6.fc28.x86_64 75/79 Installing : gcc-8.1.1-1.fc28.x86_64 76/79 Running scriptlet: gcc-8.1.1-1.fc28.x86_64 76/79 Installing : perl-IO-Socket-SSL-2.056-1.fc28.noarch 77/79 Installing : perl-Mozilla-CA-20160104-7.fc28.noarch 78/79 Cleanup : libgcc-8.0.1-0.20.fc28.x86_64 79/79 Running scriptlet: libgcc-8.0.1-0.20.fc28.x86_64 79/79 Running scriptlet: guile-5:2.0.14-7.fc28.x86_64 79/79 Running scriptlet: libgcc-8.0.1-0.20.fc28.x86_64 79/79 Verifying : make-1:4.2.1-6.fc28.x86_64 1/79 Verifying : gc-7.6.4-3.fc28.x86_64 2/79 Verifying : guile-5:2.0.14-7.fc28.x86_64 3/79 Verifying : libatomic_ops-7.6.2-3.fc28.x86_64 4/79 Verifying : git-2.17.1-3.fc28.x86_64 5/79 Verifying : git-core-2.17.1-3.fc28.x86_64 6/79 Verifying : git-core-doc-2.17.1-3.fc28.noarch 7/79 Verifying : perl-Git-2.17.1-3.fc28.noarch 8/79 Verifying : libsecret-0.18.6-1.fc28.x86_64 9/79 Verifying : perl-Getopt-Long-1:2.50-4.fc28.noarch 10/79 Verifying : perl-PathTools-3.74-1.fc28.x86_64 11/79 Verifying : perl-TermReadKey-2.37-7.fc28.x86_64 12/79 Verifying : less-530-1.fc28.x86_64 13/79 Verifying : perl-Error-1:0.17025-2.fc28.noarch 14/79 Verifying : perl-Exporter-5.72-396.fc28.noarch 15/79 Verifying : perl-Pod-Usage-4:1.69-395.fc28.noarch 16/79 Verifying : perl-Text-ParseWords-3.30-395.fc28.noarch 17/79 Verifying : perl-constant-1.33-396.fc28.noarch 18/79 Verifying : perl-Scalar-List-Utils-3:1.49-2.fc28.x86_64 19/79 Verifying : perl-Pod-Perldoc-3.28-396.fc28.noarch 20/79 Verifying : groff-base-1.22.3-15.fc28.x86_64 21/79 Verifying : perl-HTTP-Tiny-0.070-395.fc28.noarch 22/79 Verifying : perl-Pod-Simple-1:3.35-395.fc28.noarch 23/79 Verifying : perl-parent-1:0.236-395.fc28.noarch 24/79 Verifying : perl-MIME-Base64-3.15-396.fc28.x86_64 25/79 Verifying : perl-Socket-4:2.027-2.fc28.x86_64 26/79 Verifying : perl-Pod-Escapes-1:1.07-395.fc28.noarch 27/79 Verifying : perl-Text-Tabs+Wrap-2013.0523-395.fc28.noarch 28/79 Verifying : gcc-8.1.1-1.fc28.x86_64 29/79 Verifying : cpp-8.1.1-1.fc28.x86_64 30/79 Verifying : isl-0.16.1-6.fc28.x86_64 31/79 Verifying : libmpc-1.0.2-9.fc28.x86_64 32/79 Verifying : perl-interpreter-4:5.26.2-412.fc28.x86_64 33/79 Verifying : perl-libs-4:5.26.2-412.fc28.x86_64 34/79 Verifying : perl-File-Path-2.15-2.fc28.noarch 35/79 Verifying : perl-Unicode-Normalize-1.25-396.fc28.x86_64 36/79 Verifying : perl-threads-1:2.21-2.fc28.x86_64 37/79 Verifying : perl-threads-shared-1.58-2.fc28.x86_64 38/79 Verifying : perl-Errno-1.28-412.fc28.x86_64 39/79 Verifying : perl-Carp-1.42-396.fc28.noarch 40/79 Verifying : perl-podlators-4.11-1.fc28.noarch 41/79 Verifying : perl-Term-ANSIColor-4.06-396.fc28.noarch 42/79 Verifying : perl-Term-Cap-1.17-395.fc28.noarch 43/79 Verifying : ncurses-6.1-4.20180224.fc28.x86_64 44/79 Verifying : perl-Encode-4:2.97-3.fc28.x86_64 45/79 Verifying : perl-File-Temp-0.230.600-1.fc28.noarch 46/79 Verifying : perl-IO-1.38-412.fc28.x86_64 47/79 Verifying : perl-Time-Local-1:1.280-1.fc28.noarch 48/79 Verifying : perl-Storable-1:3.11-2.fc28.x86_64 49/79 Verifying : libtool-ltdl-2.4.6-24.fc28.x86_64 50/79 Verifying : libstdc++-8.1.1-1.fc28.x86_64 51/79 Verifying : perl-macros-4:5.26.2-412.fc28.x86_64 52/79 Verifying : openssh-clients-7.7p1-5.fc28.x86_64 53/79 Verifying : openssh-7.7p1-5.fc28.x86_64 54/79 Verifying : fipscheck-lib-1.5.0-4.fc28.x86_64 55/79 Verifying : libedit-3.1-23.20170329cvs.fc28.x86_64 56/79 Verifying : fipscheck-1.5.0-4.fc28.x86_64 57/79 Verifying : emacs-filesystem-1:26.1-3.fc28.noarch 58/79 Verifying : binutils-2.29.1-23.fc28.x86_64 59/79 Verifying : glibc-devel-2.27-8.fc28.x86_64 60/79 Verifying : libxcrypt-devel-4.0.0-5.fc28.x86_64 61/79 Verifying : glibc-headers-2.27-8.fc28.x86_64 62/79 Verifying : libgomp-8.1.1-1.fc28.x86_64 63/79 Verifying : pkgconf-pkg-config-1.4.2-1.fc28.x86_64 64/79 Verifying : pkgconf-1.4.2-1.fc28.x86_64 65/79 Verifying : pkgconf-m4-1.4.2-1.fc28.noarch 66/79 Verifying : libpkgconf-1.4.2-1.fc28.x86_64 67/79 Verifying : kernel-headers-4.17.6-200.fc28.x86_64 68/79 Verifying : perl-Mozilla-CA-20160104-7.fc28.noarch 69/79 Verifying : perl-IO-Socket-SSL-2.056-1.fc28.noarch 70/79 Verifying : perl-IO-Socket-IP-0.39-5.fc28.noarch 71/79 Verifying : perl-Net-SSLeay-1.85-1.fc28.x86_64 72/79 Verifying : perl-URI-1.73-2.fc28.noarch 73/79 Verifying : perl-Data-Dumper-2.167-399.fc28.x86_64 74/79 Verifying : perl-libnet-3.11-3.fc28.noarch 75/79 Verifying : perl-Digest-MD5-2.55-396.fc28.x86_64 76/79 Verifying : perl-Digest-1.17-395.fc28.noarch 77/79 Verifying : libgcc-8.1.1-1.fc28.x86_64 78/79 Verifying : libgcc-8.0.1-0.20.fc28.x86_64 79/79 Installed: gcc.x86_64 8.1.1-1.fc28 git.x86_64 2.17.1-3.fc28 make.x86_64 1:4.2.1-6.fc28 perl-IO-Socket-SSL.noarch 2.056-1.fc28 perl-Mozilla-CA.noarch 20160104-7.fc28 binutils.x86_64 2.29.1-23.fc28 cpp.x86_64 8.1.1-1.fc28 emacs-filesystem.noarch 1:26.1-3.fc28 fipscheck.x86_64 1.5.0-4.fc28 fipscheck-lib.x86_64 1.5.0-4.fc28 gc.x86_64 7.6.4-3.fc28 git-core.x86_64 2.17.1-3.fc28 git-core-doc.noarch 2.17.1-3.fc28 glibc-devel.x86_64 2.27-8.fc28 glibc-headers.x86_64 2.27-8.fc28 groff-base.x86_64 1.22.3-15.fc28 guile.x86_64 5:2.0.14-7.fc28 isl.x86_64 0.16.1-6.fc28 kernel-headers.x86_64 4.17.6-200.fc28 less.x86_64 530-1.fc28 libatomic_ops.x86_64 7.6.2-3.fc28 libedit.x86_64 3.1-23.20170329cvs.fc28 libgomp.x86_64 8.1.1-1.fc28 libmpc.x86_64 1.0.2-9.fc28 libpkgconf.x86_64 1.4.2-1.fc28 libsecret.x86_64 0.18.6-1.fc28 libstdc++.x86_64 8.1.1-1.fc28 libtool-ltdl.x86_64 2.4.6-24.fc28 libxcrypt-devel.x86_64 4.0.0-5.fc28 ncurses.x86_64 6.1-4.20180224.fc28 openssh.x86_64 7.7p1-5.fc28 openssh-clients.x86_64 7.7p1-5.fc28 perl-Carp.noarch 1.42-396.fc28 perl-Data-Dumper.x86_64 2.167-399.fc28 perl-Digest.noarch 1.17-395.fc28 perl-Digest-MD5.x86_64 2.55-396.fc28 perl-Encode.x86_64 4:2.97-3.fc28 perl-Errno.x86_64 1.28-412.fc28 perl-Error.noarch 1:0.17025-2.fc28 perl-Exporter.noarch 5.72-396.fc28 perl-File-Path.noarch 2.15-2.fc28 perl-File-Temp.noarch 0.230.600-1.fc28 perl-Getopt-Long.noarch 1:2.50-4.fc28 perl-Git.noarch 2.17.1-3.fc28 perl-HTTP-Tiny.noarch 0.070-395.fc28 perl-IO.x86_64 1.38-412.fc28 perl-IO-Socket-IP.noarch 0.39-5.fc28 perl-MIME-Base64.x86_64 3.15-396.fc28 perl-Net-SSLeay.x86_64 1.85-1.fc28 perl-PathTools.x86_64 3.74-1.fc28 perl-Pod-Escapes.noarch 1:1.07-395.fc28 perl-Pod-Perldoc.noarch 3.28-396.fc28 perl-Pod-Simple.noarch 1:3.35-395.fc28 perl-Pod-Usage.noarch 4:1.69-395.fc28 perl-Scalar-List-Utils.x86_64 3:1.49-2.fc28 perl-Socket.x86_64 4:2.027-2.fc28 perl-Storable.x86_64 1:3.11-2.fc28 perl-Term-ANSIColor.noarch 4.06-396.fc28 perl-Term-Cap.noarch 1.17-395.fc28 perl-TermReadKey.x86_64 2.37-7.fc28 perl-Text-ParseWords.noarch 3.30-395.fc28 perl-Text-Tabs+Wrap.noarch 2013.0523-395.fc28 perl-Time-Local.noarch 1:1.280-1.fc28 perl-URI.noarch 1.73-2.fc28 perl-Unicode-Normalize.x86_64 1.25-396.fc28 perl-constant.noarch 1.33-396.fc28 perl-interpreter.x86_64 4:5.26.2-412.fc28 perl-libnet.noarch 3.11-3.fc28 perl-libs.x86_64 4:5.26.2-412.fc28 perl-macros.x86_64 4:5.26.2-412.fc28 perl-parent.noarch 1:0.236-395.fc28 perl-podlators.noarch 4.11-1.fc28 perl-threads.x86_64 1:2.21-2.fc28 perl-threads-shared.x86_64 1.58-2.fc28 pkgconf.x86_64 1.4.2-1.fc28 pkgconf-m4.noarch 1.4.2-1.fc28 pkgconf-pkg-config.x86_64 1.4.2-1.fc28 Upgraded: libgcc.x86_64 8.1.1-1.fc28 Complete! 18 files removed ---> 4f9ac85fbee5 Removing intermediate container 904cc9ba2d3a Step 5/9 : ENV GIMME_GO_VERSION 1.9.2 ---> Running in 3f5d7663ad66 ---> 788ec0618eab Removing intermediate container 3f5d7663ad66 Step 6/9 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Running in 2b85514ac0c8  ---> cc3ff134b422 Removing intermediate container 2b85514ac0c8 Step 7/9 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Running in 2957c95d4321 ---> cd908bbed6a4 Removing intermediate container 2957c95d4321 Step 8/9 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/masterzen/winrm-cli ---> Running in 00425bea7fba go version go1.9.2 linux/amd64  ---> 1630fb4c77d9 Removing intermediate container 00425bea7fba Step 9/9 : LABEL "kubevirt-functional-tests-openshift-3.10-release1" '' "winrmcli" '' ---> Running in b98d5eaaa7aa ---> 9bc07525f463 Removing intermediate container b98d5eaaa7aa Successfully built 9bc07525f463 Sending build context to Docker daemon 35.17 MB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> 43cfafb0eafc Step 3/5 : COPY example-hook-sidecar /example-hook-sidecar ---> Using cache ---> dddddb5dd60d Step 4/5 : ENTRYPOINT /example-hook-sidecar ---> Using cache ---> d1ebbddfb954 Step 5/5 : LABEL "example-hook-sidecar" '' "kubevirt-functional-tests-openshift-3.10-release1" '' ---> Running in 41567b7eb523 ---> 93d51dc83977 Removing intermediate container 41567b7eb523 Successfully built 93d51dc83977 hack/build-docker.sh push The push refers to a repository [localhost:32776/kubevirt/virt-controller] 08418207a0e6: Preparing 291a040d9067: Preparing 891e1e4ef82a: Preparing 291a040d9067: Pushed 08418207a0e6: Pushed 891e1e4ef82a: Pushed devel: digest: sha256:39c5f79057f5ca4dfe11cf1fe8e76fff9e36a5780d2e9d2d5dce0e89f4af8075 size: 949 The push refers to a repository [localhost:32776/kubevirt/virt-launcher] 9a5014861af6: Preparing 39441d9fd12b: Preparing 28cb7bf04f41: Preparing 9466e554a29a: Preparing 775662c88314: Preparing 03cf24bfe08c: Preparing da38cf808aa5: Preparing b83399358a92: Preparing 186d8b3e4fd8: Preparing fa6154170bf5: Preparing 5eefb9960a36: Preparing 891e1e4ef82a: Preparing b83399358a92: Waiting 186d8b3e4fd8: Waiting fa6154170bf5: Waiting 775662c88314: Waiting 03cf24bfe08c: Waiting da38cf808aa5: Waiting 5eefb9960a36: Waiting 891e1e4ef82a: Waiting 9466e554a29a: Pushed 39441d9fd12b: Pushed 9a5014861af6: Pushed da38cf808aa5: Pushed b83399358a92: Pushed 186d8b3e4fd8: Pushed fa6154170bf5: Pushed 28cb7bf04f41: Pushed 891e1e4ef82a: Mounted from kubevirt/virt-controller 03cf24bfe08c: Pushed 775662c88314: Pushed 5eefb9960a36: Pushed devel: digest: sha256:d6a578ea40cf6108dc4f4e68f00bfcb309b9aac5112b3a873226bfc720e9987e size: 2828 The push refers to a repository [localhost:32776/kubevirt/virt-handler] 90631de6634e: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-launcher 90631de6634e: Pushed devel: digest: sha256:1f3dd5d314b09e09908753e3cdab95ca29e77a3048096562e9b987fdba6cb75c size: 741 The push refers to a repository [localhost:32776/kubevirt/virt-api] 1071ec546674: Preparing c1418c9009fc: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-handler c1418c9009fc: Pushed 1071ec546674: Pushed devel: digest: sha256:f36b0c93c252817402150ce4dcc1fc82e88aa40245914bb5938387047ad6d453 size: 948 The push refers to a repository [localhost:32776/kubevirt/disks-images-provider] 080f4f9db6ce: Preparing 7270498e55cc: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/virt-api 080f4f9db6ce: Pushed 7270498e55cc: Pushed devel: digest: sha256:8709cdeccb875213a91f663406cdcb412bdf46958abcbb64f0578f077b3d91bd size: 948 The push refers to a repository [localhost:32776/kubevirt/vm-killer] 68a997c47b9c: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/disks-images-provider 68a997c47b9c: Pushed devel: digest: sha256:ef134d074f73e4e1c9fba60d033d14d8e6a1e342b9d1f31aecfeba85db74ae6b size: 740 The push refers to a repository [localhost:32776/kubevirt/registry-disk-v1alpha] 0905ff81ba68: Preparing 0be79cca88bb: Preparing 25edbec0eaea: Preparing 0905ff81ba68: Pushed 0be79cca88bb: Pushed 25edbec0eaea: Pushed devel: digest: sha256:ed0bd481160a0cede386b5ded1a24962ab205ad990648364a4c5ac80a5aabcb4 size: 948 The push refers to a repository [localhost:32776/kubevirt/cirros-registry-disk-demo] d96d9d4058bd: Preparing 0905ff81ba68: Preparing 0be79cca88bb: Preparing 25edbec0eaea: Preparing 0905ff81ba68: Mounted from kubevirt/registry-disk-v1alpha 0be79cca88bb: Mounted from kubevirt/registry-disk-v1alpha 25edbec0eaea: Mounted from kubevirt/registry-disk-v1alpha d96d9d4058bd: Pushed devel: digest: sha256:3eda76ad2876dce8ed05cf4f1420a6b4deb466f1748a3cd869284d4e8b0a5c57 size: 1160 The push refers to a repository [localhost:32776/kubevirt/fedora-cloud-registry-disk-demo] 77e4962ee648: Preparing 0905ff81ba68: Preparing 0be79cca88bb: Preparing 25edbec0eaea: Preparing 0905ff81ba68: Mounted from kubevirt/cirros-registry-disk-demo 25edbec0eaea: Mounted from kubevirt/cirros-registry-disk-demo 0be79cca88bb: Mounted from kubevirt/cirros-registry-disk-demo 77e4962ee648: Pushed devel: digest: sha256:cde8bb7baac55d26dc35f37051992f5fdf13a7abbcb0091ba2a02e1caba04e3c size: 1161 The push refers to a repository [localhost:32776/kubevirt/alpine-registry-disk-demo] 3d4d18d8047e: Preparing 0905ff81ba68: Preparing 0be79cca88bb: Preparing 25edbec0eaea: Preparing 25edbec0eaea: Waiting 0905ff81ba68: Mounted from kubevirt/fedora-cloud-registry-disk-demo 0be79cca88bb: Mounted from kubevirt/fedora-cloud-registry-disk-demo 25edbec0eaea: Mounted from kubevirt/fedora-cloud-registry-disk-demo 3d4d18d8047e: Pushed devel: digest: sha256:5235b3032dde81c133db7b34eeaf37456ab6bee389f98c7e9016a4d2cd5396ee size: 1160 The push refers to a repository [localhost:32776/kubevirt/subresource-access-test] dfc61836ec00: Preparing f11f8a160bfe: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/vm-killer f11f8a160bfe: Pushed dfc61836ec00: Pushed devel: digest: sha256:71e928afaa2631bccb0b342ec9191aba0cab4d75cce393433fe43420730a4d9b size: 948 The push refers to a repository [localhost:32776/kubevirt/winrmcli] 19038f244d65: Preparing 40d75932eef1: Preparing 8acbb2baad2c: Preparing 891e1e4ef82a: Preparing 891e1e4ef82a: Mounted from kubevirt/subresource-access-test 19038f244d65: Pushed 8acbb2baad2c: Pushed 40d75932eef1: Pushed devel: digest: sha256:15709188bf181f6c2774394c3a01e79bc16d01837eb08f8a2f58dbe1c3bdea97 size: 1165 The push refers to a repository [localhost:32776/kubevirt/example-hook-sidecar] 8bb4b4a7b7a3: Preparing 39bae602f753: Preparing 8bb4b4a7b7a3: Pushed 39bae602f753: Pushed devel: digest: sha256:3037181d36600acd84994c363509f9726c2f6da6d083043d157ebd7677e26f85 size: 740 make[1]: Leaving directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' Done ./cluster/clean.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-55-g1a15a6b ++ KUBEVIRT_VERSION=v0.7.0-55-g1a15a6b + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:50a4b8ee3e07d592e7e4fbf3eb1401980a5947499dfdc3d847c085b5775aaa9a ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32776/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Cleaning up ...' Cleaning up ... + cluster/kubectl.sh get vmis --all-namespaces -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,FINALIZERS:.metadata.finalizers --no-headers + grep foregroundDeleteVirtualMachine + read p error: the server doesn't have a resource type "vmis" + _kubectl delete ds -l kubevirt.io -n kube-system --cascade=false --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=libvirt --force --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=virt-handler --force --grace-period 0 No resources found + namespaces=(default ${namespace}) + for i in '${namespaces[@]}' + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete deployment -l kubevirt.io No resources found + _kubectl -n default delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rs -l kubevirt.io No resources found + _kubectl -n default delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete services -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n default delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete secrets -l kubevirt.io No resources found + _kubectl -n default delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pv -l kubevirt.io No resources found + _kubectl -n default delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pvc -l kubevirt.io No resources found + _kubectl -n default delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete ds -l kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n default delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pods -l kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n default delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rolebinding -l kubevirt.io No resources found + _kubectl -n default delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete roles -l kubevirt.io No resources found + _kubectl -n default delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterroles -l kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n default get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + for i in '${namespaces[@]}' + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete deployment -l kubevirt.io No resources found + _kubectl -n kube-system delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rs -l kubevirt.io No resources found + _kubectl -n kube-system delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete services -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n kube-system delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete secrets -l kubevirt.io No resources found + _kubectl -n kube-system delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pv -l kubevirt.io No resources found + _kubectl -n kube-system delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pvc -l kubevirt.io No resources found + _kubectl -n kube-system delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete ds -l kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n kube-system delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pods -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete roles -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterroles -l kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ wc -l ++ cluster/os-3.10.0/.kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + sleep 2 + echo Done Done ./cluster/deploy.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-55-g1a15a6b ++ KUBEVIRT_VERSION=v0.7.0-55-g1a15a6b + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:50a4b8ee3e07d592e7e4fbf3eb1401980a5947499dfdc3d847c085b5775aaa9a ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test cmd/example-hook-sidecar' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli cmd/example-hook-sidecar' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32776/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Deploying ...' Deploying ... + [[ -z openshift-3.10-release ]] + [[ openshift-3.10-release =~ .*-dev ]] + [[ openshift-3.10-release =~ .*-release ]] + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/demo-content.yaml =~ .*demo.* ]] + continue + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml =~ .*demo.* ]] + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml clusterrole.rbac.authorization.k8s.io "kubevirt.io:admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:edit" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:view" created serviceaccount "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver-auth-delegator" created rolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created role.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-controller" created serviceaccount "kubevirt-controller" created serviceaccount "kubevirt-privileged" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller-cluster-admin" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-privileged-cluster-admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:default" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt.io:default" created service "virt-api" created deployment.extensions "virt-api" created deployment.extensions "virt-controller" created daemonset.extensions "virt-handler" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstances.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancereplicasets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancepresets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachines.kubevirt.io" created + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R persistentvolumeclaim "disk-alpine" created persistentvolume "host-path-disk-alpine" created persistentvolumeclaim "disk-custom" created persistentvolume "host-path-disk-custom" created daemonset.extensions "disks-images-provider" created serviceaccount "kubevirt-testing" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-testing-cluster-admin" created + [[ os-3.10.0 =~ os-* ]] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-controller"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-testing"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-privileged"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-apiserver"] + _kubectl adm policy add-scc-to-user privileged admin + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged admin scc "privileged" added to: ["admin"] + echo Done Done + namespaces=(kube-system default) + [[ kube-system != \k\u\b\e\-\s\y\s\t\e\m ]] + timeout=300 + sample=30 + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'disks-images-provider-c94g5 0/1 Pending 0 1s disks-images-provider-pnkd5 0/1 Pending 0 1s virt-api-7d79764579-jtxcb 0/1 ContainerCreating 0 3s virt-api-7d79764579-n5pn2 0/1 ContainerCreating 0 3s virt-controller-7d57d96b65-4mh8j 0/1 ContainerCreating 0 3s virt-controller-7d57d96b65-frrc5 0/1 ContainerCreating 0 3s virt-handler-c56fw 0/1 ContainerCreating 0 3s virt-handler-h7qb6 0/1 ContainerCreating 0 3s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + cluster/kubectl.sh get pods -n kube-system --no-headers + grep -v Running disks-images-provider-c94g5 0/1 Pending 0 2s disks-images-provider-pnkd5 0/1 ContainerCreating 0 2s virt-api-7d79764579-jtxcb 0/1 ContainerCreating 0 4s virt-api-7d79764579-n5pn2 0/1 ContainerCreating 0 4s virt-controller-7d57d96b65-4mh8j 0/1 ContainerCreating 0 4s virt-controller-7d57d96b65-frrc5 0/1 ContainerCreating 0 4s virt-handler-c56fw 0/1 ContainerCreating 0 4s virt-handler-h7qb6 0/1 ContainerCreating 0 4s + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n 'disks-images-provider-c94g5 0/1 ContainerCreating 0 38s disks-images-provider-pnkd5 0/1 ContainerCreating 0 38s virt-api-7d79764579-n5pn2 0/1 Error 0 40s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + grep -v Running + cluster/kubectl.sh get pods -n kube-system --no-headers disks-images-provider-c94g5 0/1 ContainerCreating 0 39s disks-images-provider-pnkd5 0/1 ContainerCreating 0 39s virt-api-7d79764579-n5pn2 0/1 Error 0 41s + sleep 30 + current_time=60 + '[' 60 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ grep -v Running ++ cluster/kubectl.sh get pods -n kube-system --no-headers + '[' -n '' ']' + current_time=0 ++ grep false ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n kube-system + cluster/kubectl.sh get pods -n kube-system NAME READY STATUS RESTARTS AGE disks-images-provider-c94g5 1/1 Running 0 1m disks-images-provider-pnkd5 1/1 Running 0 1m master-api-node01 1/1 Running 1 10d master-controllers-node01 1/1 Running 2 10d master-etcd-node01 1/1 Running 1 10d virt-api-7d79764579-jtxcb 1/1 Running 0 1m virt-api-7d79764579-n5pn2 1/1 Running 1 1m virt-controller-7d57d96b65-4mh8j 1/1 Running 0 1m virt-controller-7d57d96b65-frrc5 1/1 Running 0 1m virt-handler-c56fw 1/1 Running 0 1m virt-handler-h7qb6 1/1 Running 0 1m + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n default --no-headers ++ cluster/kubectl.sh get pods -n default --no-headers ++ grep -v Running + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n default + cluster/kubectl.sh get pods -n default NAME READY STATUS RESTARTS AGE docker-registry-1-rl562 1/1 Running 1 10d registry-console-1-rw9zf 1/1 Running 1 10d router-1-6cch9 1/1 Running 1 10d + kubectl version + cluster/kubectl.sh version oc v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 features: Basic-Auth GSSAPI Kerberos SPNEGO Server https://127.0.0.1:32773 openshift v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 + ginko_params='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + [[ -d /home/nfs/images/windows2016 ]] + FUNC_TEST_ARGS='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + make functest hack/dockerized "hack/build-func-tests.sh" sha256:eac86de70a4e6cb392340c5eb3c9e29aa4eee64229c68e6e8a3ba9514fb773e5 go version go1.10 linux/amd64 Waiting for rsyncd to be ready go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/functests.sh Running Suite: Tests Suite ========================== Random Seed: 1531915733 Will run 140 of 140 specs •• ------------------------------ • [SLOW TEST:22.275 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should update VirtualMachine once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:195 ------------------------------ • [SLOW TEST:10.262 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should remove VirtualMachineInstance once the VMI is marked for deletion /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:204 ------------------------------ • ------------------------------ • [SLOW TEST:36.497 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if it gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:245 ------------------------------ • [SLOW TEST:47.472 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if the VirtualMachineInstance's pod gets deleted /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:265 ------------------------------ • [SLOW TEST:43.472 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should stop VirtualMachineInstance if running set to false /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:325 ------------------------------ • [SLOW TEST:180.339 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should start and stop VirtualMachineInstance multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:333 ------------------------------ • [SLOW TEST:50.381 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should not update the VirtualMachineInstance spec if Running /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:346 ------------------------------ • [SLOW TEST:209.773 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should survive guest shutdown, multiple times /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:387 ------------------------------ • [SLOW TEST:17.333 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should start a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:436 ------------------------------ • [SLOW TEST:37.428 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:435 should stop a VirtualMachineInstance once /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:467 ------------------------------ • [SLOW TEST:61.487 seconds] Health Monitoring /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:37 A VirtualMachineInstance with a watchdog device /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:56 should be shut down when the watchdog expires /root/go/src/kubevirt.io/kubevirt/tests/vmi_monitoring_test.go:57 ------------------------------ • [SLOW TEST:49.663 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 should have cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:82 ------------------------------ • [SLOW TEST:174.420 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userDataBase64 source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:81 with injected ssh-key /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:92 should have ssh-key under authorized keys /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:93 ------------------------------ • [SLOW TEST:62.232 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 with cloudInitNoCloud userData source /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:118 should process provided cloud-init data /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:119 ------------------------------ • [SLOW TEST:51.059 seconds] CloudInit UserData /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:80 should take user-data from k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_userdata_test.go:162 ------------------------------ • [SLOW TEST:126.768 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • Pod name: disks-images-provider-c94g5 Pod phase: Running copy all images to host mount directory Pod name: disks-images-provider-pnkd5 Pod phase: Running copy all images to host mount directory Pod name: virt-api-7d79764579-jtxcb Pod phase: Running 2018/07/18 12:29:42 http: TLS handshake error from 10.129.0.1:42238: EOF 2018/07/18 12:29:52 http: TLS handshake error from 10.129.0.1:42248: EOF 2018/07/18 12:30:02 http: TLS handshake error from 10.129.0.1:42256: EOF 2018/07/18 12:30:12 http: TLS handshake error from 10.129.0.1:42264: EOF 2018/07/18 12:30:22 http: TLS handshake error from 10.129.0.1:42272: EOF 2018/07/18 12:30:32 http: TLS handshake error from 10.129.0.1:42280: EOF 2018/07/18 12:30:42 http: TLS handshake error from 10.129.0.1:42288: EOF 2018/07/18 12:30:52 http: TLS handshake error from 10.129.0.1:42296: EOF 2018/07/18 12:31:02 http: TLS handshake error from 10.129.0.1:42304: EOF 2018/07/18 12:31:12 http: TLS handshake error from 10.129.0.1:42312: EOF 2018/07/18 12:31:22 http: TLS handshake error from 10.129.0.1:42320: EOF level=info timestamp=2018-07-18T12:31:26.611414Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/ proto=HTTP/1.1 statusCode=404 contentLength=19 2018/07/18 12:31:32 http: TLS handshake error from 10.129.0.1:42328: EOF 2018/07/18 12:31:42 http: TLS handshake error from 10.129.0.1:42336: EOF 2018/07/18 12:31:52 http: TLS handshake error from 10.129.0.1:42344: EOF Pod name: virt-api-7d79764579-n5pn2 Pod phase: Running level=info timestamp=2018-07-18T12:31:11.351067Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-18T12:31:12.482934Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/18 12:31:17 http: TLS handshake error from 10.129.0.1:38606: EOF level=info timestamp=2018-07-18T12:31:19.917238Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-18T12:31:23.844882Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/18 12:31:27 http: TLS handshake error from 10.129.0.1:38614: EOF level=info timestamp=2018-07-18T12:31:29.963218Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/18 12:31:37 http: TLS handshake error from 10.129.0.1:38622: EOF level=info timestamp=2018-07-18T12:31:40.012394Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-18T12:31:40.629245Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/openapi/v2 proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-18T12:31:40.630376Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url=/swagger.json proto=HTTP/2.0 statusCode=404 contentLength=19 level=info timestamp=2018-07-18T12:31:41.410105Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 level=info timestamp=2018-07-18T12:31:42.538750Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 2018/07/18 12:31:47 http: TLS handshake error from 10.129.0.1:38630: EOF level=info timestamp=2018-07-18T12:31:50.060224Z pos=filter.go:46 component=virt-api remoteAddress=10.128.0.1 username=- method=GET url="/apis/subresources.kubevirt.io/v1alpha2?timeout=32s" proto=HTTP/2.0 statusCode=200 contentLength=136 Pod name: virt-controller-7d57d96b65-4mh8j Pod phase: Running level=info timestamp=2018-07-18T12:21:49.811719Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmin4xc8 kind= uid=25b840d6-8a85-11e8-b794-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-18T12:21:49.893276Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmin4xc8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmin4xc8" level=info timestamp=2018-07-18T12:24:44.240294Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7p9w8 kind= uid=8daec4aa-8a85-11e8-b794-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-18T12:24:44.240496Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi7p9w8 kind= uid=8daec4aa-8a85-11e8-b794-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-18T12:24:44.380192Z pos=vmi.go:157 component=virt-controller service=http reason="Operation cannot be fulfilled on virtualmachineinstances.kubevirt.io \"testvmi7p9w8\": the object has been modified; please apply your changes to the latest version and try again" msg="reenqueuing VirtualMachineInstance kubevirt-test-default/testvmi7p9w8" level=info timestamp=2018-07-18T12:25:46.502842Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi65zl9 kind= uid=b2cadd1b-8a85-11e8-b794-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-18T12:25:46.502991Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi65zl9 kind= uid=b2cadd1b-8a85-11e8-b794-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-18T12:26:37.527786Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmis42dd kind= uid=d135bf88-8a85-11e8-b794-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-18T12:26:37.527940Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmis42dd kind= uid=d135bf88-8a85-11e8-b794-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-18T12:26:37.540581Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-18T12:26:37.540687Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-18T12:26:37.565249Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqttwv kind= uid=d139b59d-8a85-11e8-b794-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-18T12:26:37.565345Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmiqttwv kind= uid=d139b59d-8a85-11e8-b794-525500d15501 msg="Marking VirtualMachineInstance as initialized" level=info timestamp=2018-07-18T12:26:37.576999Z pos=preset.go:139 component=virt-controller service=http namespace=kubevirt-test-default name=testvmigtmz4 kind= uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Initializing VirtualMachineInstance" level=info timestamp=2018-07-18T12:26:37.577131Z pos=preset.go:165 component=virt-controller service=http namespace=kubevirt-test-default name=testvmigtmz4 kind= uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Marking VirtualMachineInstance as initialized" Pod name: virt-controller-7d57d96b65-frrc5 Pod phase: Running level=info timestamp=2018-07-18T12:05:45.141933Z pos=application.go:174 component=virt-controller service=http action=listening interface=0.0.0.0 port=8182 Pod name: virt-handler-c56fw Pod phase: Running level=info timestamp=2018-07-18T12:05:47.380538Z pos=cache.go:151 component=virt-handler msg="Synchronizing domains" level=info timestamp=2018-07-18T12:05:47.479201Z pos=device_controller.go:131 component=virt-handler msg="Starting device plugin controller" level=info timestamp=2018-07-18T12:05:47.510877Z pos=device_controller.go:125 component=virt-handler msg="kvm device plugin started" level=info timestamp=2018-07-18T12:05:47.539634Z pos=device_controller.go:125 component=virt-handler msg="tun device plugin started" level=info timestamp=2018-07-18T12:26:57.855777Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmigtmz4 kind= uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-18T12:26:58.907790Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type ADDED" level=info timestamp=2018-07-18T12:26:58.918422Z pos=vm.go:657 component=virt-handler namespace=kubevirt-test-default name=testvmigtmz4 kind=Domain uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Domain is in state Paused reason Unknown" level=info timestamp=2018-07-18T12:26:59.482770Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-18T12:26:59.486830Z pos=vm.go:688 component=virt-handler namespace=kubevirt-test-default name=testvmigtmz4 kind=Domain uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Domain is in state Running reason Unknown" level=info timestamp=2018-07-18T12:26:59.533667Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-18T12:26:59.539971Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmigtmz4 kind= uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-18T12:26:59.540149Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmigtmz4 kind= uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="No update processing required" level=info timestamp=2018-07-18T12:26:59.576005Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmigtmz4 kind= uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-18T12:26:59.577885Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmigtmz4 kind= uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-18T12:26:59.592443Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmigtmz4 kind= uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-handler-h7qb6 Pod phase: Running level=info timestamp=2018-07-18T12:27:00.173473Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmis42dd kind= uid=d135bf88-8a85-11e8-b794-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-18T12:27:00.273863Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmis42dd kind= uid=d135bf88-8a85-11e8-b794-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-18T12:27:00.580822Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmis42dd kind= uid=d135bf88-8a85-11e8-b794-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-18T12:27:00.725252Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type ADDED" level=info timestamp=2018-07-18T12:27:00.803736Z pos=vm.go:657 component=virt-handler namespace=kubevirt-test-default name=testvmi5n288 kind=Domain uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Domain is in state Paused reason Unknown" level=info timestamp=2018-07-18T12:27:01.615867Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-18T12:27:02.105083Z pos=vm.go:392 component=virt-handler namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="No update processing required" level=info timestamp=2018-07-18T12:27:02.107924Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-18T12:27:02.108050Z pos=vm.go:688 component=virt-handler namespace=kubevirt-test-default name=testvmi5n288 kind=Domain uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Domain is in state Running reason Unknown" level=info timestamp=2018-07-18T12:27:02.585187Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-18T12:27:02.616624Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-18T12:27:02.628398Z pos=server.go:75 component=virt-handler msg="Received Domain Event of type MODIFIED" level=info timestamp=2018-07-18T12:27:02.730696Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Synchronization loop succeeded." level=info timestamp=2018-07-18T12:27:02.730846Z pos=vm.go:389 component=virt-handler namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Processing vmi update" level=info timestamp=2018-07-18T12:27:02.877775Z pos=vm.go:413 component=virt-handler namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Synchronization loop succeeded." Pod name: virt-launcher-testvmi5n288-p4jjf Pod phase: Running level=info timestamp=2018-07-18T12:27:00.653669Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-18T12:27:00.674614Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID ce170281-b2a5-4962-a6ad-f45338888b8b" level=info timestamp=2018-07-18T12:27:00.674938Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-18T12:27:00.804757Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:27:00.991446Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-18T12:27:01.065813Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-18T12:27:01.082597Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Domain started." level=info timestamp=2018-07-18T12:27:01.083916Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-18T12:27:01.680735Z pos=monitor.go:222 component=virt-launcher msg="Found PID for ce170281-b2a5-4962-a6ad-f45338888b8b: 200" level=info timestamp=2018-07-18T12:27:02.109355Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:27:02.109542Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-18T12:27:02.182822Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-18T12:27:02.631171Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-18T12:27:02.631284Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:27:02.739587Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmi5n288 kind= uid=d137cecc-8a85-11e8-b794-525500d15501 msg="Synced vmi" Pod name: virt-launcher-testvmigtmz4-w26lx Pod phase: Running level=info timestamp=2018-07-18T12:26:58.220888Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-18T12:26:58.873223Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-18T12:26:58.918888Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:26:59.150942Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID 0716ccf2-48b3-4da0-9df6-d1de94092fe0" level=info timestamp=2018-07-18T12:26:59.157859Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-18T12:26:59.448689Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-18T12:26:59.480776Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-18T12:26:59.484408Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:26:59.499343Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-18T12:26:59.525974Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmigtmz4 kind= uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Domain started." level=info timestamp=2018-07-18T12:26:59.528133Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmigtmz4 kind= uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-18T12:26:59.531556Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-18T12:26:59.534917Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:26:59.585031Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmigtmz4 kind= uid=d13c3f68-8a85-11e8-b794-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-18T12:27:00.161731Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 0716ccf2-48b3-4da0-9df6-d1de94092fe0: 197" Pod name: virt-launcher-testvmiqttwv-ndl6r Pod phase: Running level=info timestamp=2018-07-18T12:26:58.101987Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-18T12:26:58.856857Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-18T12:26:58.884181Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID 921b46c0-b6de-4620-a75c-ebd0684efd34" level=info timestamp=2018-07-18T12:26:58.886145Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-18T12:26:58.897903Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:26:59.535654Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-18T12:26:59.595576Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-18T12:26:59.600791Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:26:59.610949Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-18T12:26:59.614001Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmiqttwv kind= uid=d139b59d-8a85-11e8-b794-525500d15501 msg="Domain started." level=info timestamp=2018-07-18T12:26:59.615975Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiqttwv kind= uid=d139b59d-8a85-11e8-b794-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-18T12:26:59.686154Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-18T12:26:59.739779Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:26:59.769413Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmiqttwv kind= uid=d139b59d-8a85-11e8-b794-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-18T12:26:59.897672Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 921b46c0-b6de-4620-a75c-ebd0684efd34: 197" Pod name: virt-launcher-testvmis42dd-8zwnp Pod phase: Running level=info timestamp=2018-07-18T12:26:58.224671Z pos=client.go:136 component=virt-launcher msg="Libvirt event 0 with reason 0 received" level=info timestamp=2018-07-18T12:26:58.937642Z pos=client.go:119 component=virt-launcher msg="domain status: 3:11" level=info timestamp=2018-07-18T12:26:58.997996Z pos=virt-launcher.go:215 component=virt-launcher msg="Detected domain with UUID 5ed41ac4-9657-4cd1-b093-647633ece6fa" level=info timestamp=2018-07-18T12:26:59.002969Z pos=monitor.go:253 component=virt-launcher msg="Monitoring loop: rate 1s start timeout 5m0s" level=info timestamp=2018-07-18T12:26:59.027597Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:26:59.699686Z pos=client.go:136 component=virt-launcher msg="Libvirt event 4 with reason 0 received" level=info timestamp=2018-07-18T12:26:59.749997Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-18T12:26:59.753055Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:26:59.771978Z pos=client.go:136 component=virt-launcher msg="Libvirt event 2 with reason 0 received" level=info timestamp=2018-07-18T12:26:59.825539Z pos=manager.go:188 component=virt-launcher namespace=kubevirt-test-default name=testvmis42dd kind= uid=d135bf88-8a85-11e8-b794-525500d15501 msg="Domain started." level=info timestamp=2018-07-18T12:26:59.828369Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmis42dd kind= uid=d135bf88-8a85-11e8-b794-525500d15501 msg="Synced vmi" level=info timestamp=2018-07-18T12:26:59.844702Z pos=client.go:119 component=virt-launcher msg="domain status: 1:1" level=info timestamp=2018-07-18T12:26:59.999840Z pos=client.go:145 component=virt-launcher msg="processed event" level=info timestamp=2018-07-18T12:27:00.010641Z pos=monitor.go:222 component=virt-launcher msg="Found PID for 5ed41ac4-9657-4cd1-b093-647633ece6fa: 193" level=info timestamp=2018-07-18T12:27:00.279403Z pos=server.go:74 component=virt-launcher namespace=kubevirt-test-default name=testvmis42dd kind= uid=d135bf88-8a85-11e8-b794-525500d15501 msg="Synced vmi" ------------------------------ • Failure [187.321 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be able to reach /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 the Inbound VirtualMachineInstance with custom MAC address [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Expected error: : 180000000000 expect: timer expired after 180 seconds not to have occurred /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:224 ------------------------------ STEP: checking br1 MTU inside the pod level=info timestamp=2018-07-18T12:28:46.839688Z pos=vmi_networking_test.go:185 component=tests msg="4: br1: mtu 1450 qdisc noqueue state UP group default \n link/ether 0a:58:0a:b1:a4:39 brd ff:ff:ff:ff:ff:ff\n inet 169.254.75.86/32 brd 169.254.75.86 scope global br1\n valid_lft forever preferred_lft forever\n inet6 fe80::858:aff:feb1:a439/64 scope link \n valid_lft forever preferred_lft forever\n" STEP: checking eth0 MTU inside the VirtualMachineInstance level=info timestamp=2018-07-18T12:28:47.347693Z pos=vmi_networking_test.go:205 component=tests msg="[{1 \r\n$ [$ ]} {3 ip address show eth0\r\n2: eth0: mtu 1450 qdisc pfifo_fast qlen 1000\r\n [2: eth0: mtu 1450 qdisc pfifo_fast qlen 1000\r\n]} {5 link/ether 0a:58:0a:81:00: [0]}]" STEP: checking the VirtualMachineInstance can send MTU sized frames to another VirtualMachineInstance level=info timestamp=2018-07-18T12:31:52.995849Z pos=utils.go:1190 component=tests namespace=kubevirt-test-default name=testvmi5n288 kind=VirtualMachineInstance uid=d137cecc-8a85-11e8-b794-525500d15501 msg="[{1 \r\n\r\n$ [$ ]} {3 ping 10.128.0.23 -c 1 -w 5 -s 1422\r\nPING 10.128.0.23 (10.128.0.23): 1422 data bytes\r\n\r\n--- 10.128.0.23 ping statistics ---\r\n5 packets transmitted, 0 packets received, 100% packet loss\r\n$ [$ ]} {5 echo $?\r\n1\r\n$ []}]" •• ------------------------------ • [SLOW TEST:5.121 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 should be reachable via the propagated IP from a Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 on a different node from Pod /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •• ------------------------------ • [SLOW TEST:5.430 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 with a service matching the vmi exposed /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:283 should be able to reach the vmi based on labels specified on the vmi /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:303 ------------------------------ • [SLOW TEST:5.233 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 with a service matching the vmi exposed /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:283 should fail to reach the vmi if an invalid servicename is used /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:314 ------------------------------ • [SLOW TEST:5.246 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 with a subdomain and a headless service given /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:330 should be able to reach the vmi via its unique fully qualified domain name /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:353 ------------------------------ • [SLOW TEST:56.413 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom interface model /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:379 should expose the right device type to the guest /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:380 ------------------------------ • ------------------------------ • [SLOW TEST:57.748 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:413 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:414 ------------------------------ • [SLOW TEST:55.327 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address in non-conventional format /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:425 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:426 ------------------------------ • [SLOW TEST:60.614 seconds] Networking /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:48 VirtualMachineInstance with custom MAC address and slirp interface /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:438 should configure custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:439 ------------------------------ •••• ------------------------------ • [SLOW TEST:51.117 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:49.730 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:125.078 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with Disk PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:126.426 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 with Alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:71 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 with CDRom PVC /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:53.846 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:113 should create a writeable emptyDisk with the right capacity /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:115 ------------------------------ • [SLOW TEST:52.964 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With an emptyDisk defined and a specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:163 should create a writeable emptyDisk with the specified serial number /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:165 ------------------------------ • [SLOW TEST:49.463 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should be successfully started /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:207 ------------------------------ • [SLOW TEST:117.131 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With ephemeral alpine PVC /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:205 should not persist data /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:218 ------------------------------ • [SLOW TEST:144.500 seconds] Storage /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:46 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:70 With VirtualMachineInstance with two PVCs /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:266 should start vmi multiple times /root/go/src/kubevirt.io/kubevirt/tests/storage_test.go:278 ------------------------------ • ------------------------------ • [SLOW TEST:52.323 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a cirros image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should return that we are running cirros /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:67 ------------------------------ • [SLOW TEST:57.900 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a fedora image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:76 should return that we are running fedora /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:77 ------------------------------ • [SLOW TEST:51.298 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 should be able to reconnect to console multiple times /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:86 ------------------------------ • [SLOW TEST:83.250 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting and stopping the same VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:90 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:91 should success multiple times /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:92 ------------------------------ • [SLOW TEST:16.336 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:111 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:112 should not modify the spec on status update /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:113 ------------------------------ • [SLOW TEST:38.866 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting multiple VMIs /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:129 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:130 should success /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:131 ------------------------------ •••••••••••• ------------------------------ • [SLOW TEST:5.041 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:48 Without permissions /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:56 should not be able to access subresource endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:57 ------------------------------ •• ------------------------------ • [SLOW TEST:106.513 seconds] Slirp /root/go/src/kubevirt.io/kubevirt/tests/vmi_slirp_interface_test.go:39 should be able to /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 VirtualMachineInstance with slirp interface /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •Service cluster-ip-vm successfully exposed for virtualmachineinstance testvmig6q29 ------------------------------ • [SLOW TEST:59.169 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:68 Should expose a Cluster IP service on a VM and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:71 ------------------------------ Service node-port-vm successfully exposed for virtualmachineinstance testvmig6q29 • [SLOW TEST:9.183 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:61 Expose NodePort service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:98 Should expose a NodePort service on a VM and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:103 ------------------------------ Service cluster-ip-udp-vm successfully exposed for virtualmachineinstance testvmimtkdf • [SLOW TEST:60.905 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:140 Expose ClusterIP UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:147 Should expose a ClusterIP service on a VM and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:151 ------------------------------ Service node-port-udp-vm successfully exposed for virtualmachineinstance testvmimtkdf • [SLOW TEST:10.274 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose UDP service on a VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:140 Expose NodePort UDP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:179 Should expose a NodePort service on a VM and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:184 ------------------------------ Service cluster-ip-vmrs successfully exposed for vmirs replicasetz4pnd • [SLOW TEST:76.042 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on a VM replica set /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:227 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:260 Should create a ClusterIP service on VMRS and connect to it /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:264 ------------------------------ Service cluster-ip-ovm successfully exposed for virtualmachine testvmiswdw8 • [SLOW TEST:62.392 seconds] Expose /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:53 Expose service on an Offline VM /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:292 Expose ClusterIP service /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:336 Connect to ClusterIP services that was set when VM was offline /root/go/src/kubevirt.io/kubevirt/tests/expose_test.go:337 ------------------------------ • [SLOW TEST:8.087 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should scale /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 to three, to two and then to zero replicas /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:17.983 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should scale /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 to five, to six and then to zero replicas /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ •• ------------------------------ • [SLOW TEST:21.202 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should update readyReplicas once VMIs are up /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:157 ------------------------------ • [SLOW TEST:8.531 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should remove VMIs once it is marked for deletion /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:169 ------------------------------ •• ------------------------------ • [SLOW TEST:7.505 seconds] VirtualMachineInstanceReplicaSet /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:46 should remove the finished VM /root/go/src/kubevirt.io/kubevirt/tests/replicaset_test.go:279 ------------------------------ • [SLOW TEST:35.629 seconds] LeaderElection /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:43 Start a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:53 when the controller pod is not running /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:54 should success /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:55 ------------------------------ • [SLOW TEST:8.183 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:7.876 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given an vm /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:7.787 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi preset /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:7.852 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi replica set /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.004 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to start a vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:133 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to stop a running vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:139 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have correct UUID /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:192 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have pod IP /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:208 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.003 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to start a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:242 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.006 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to stop a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:250 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1342 ------------------------------ • [SLOW TEST:19.181 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should successfully start with hook sidecar annotation /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:60 ------------------------------ • [SLOW TEST:16.978 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should call Collect and OnDefineDomain on the hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:67 ------------------------------ • [SLOW TEST:19.834 seconds] HookSidecars /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:40 VMI definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:58 with SM BIOS hook sidecar /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:59 should update domain XML with SM BIOS properties /root/go/src/kubevirt.io/kubevirt/tests/vmi_hook_sidecar_test.go:83 ------------------------------ • ------------------------------ • [SLOW TEST:17.223 seconds] VNC /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:46 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:54 with VNC connection /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:62 should allow accessing the VNC device /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:64 ------------------------------ ••volumedisk0 compute ------------------------------ • [SLOW TEST:52.111 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with 3 CPU cores /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:56 should report 3 cpu cores under guest OS /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:62 ------------------------------ • [SLOW TEST:18.116 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:108 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-2Mi /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ S [SKIPPING] [0.214 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:108 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-1Gi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 No node with hugepages hugepages-1Gi capacity /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:160 ------------------------------ • ------------------------------ • [SLOW TEST:84.960 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:277 should report defined CPU model /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:278 ------------------------------ • [SLOW TEST:97.374 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 with CPU spec /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:238 when CPU model not defined /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:305 should report CPU model from libvirt capabilities /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:306 ------------------------------ • [SLOW TEST:51.794 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:44 New VirtualMachineInstance with all supported drives /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:326 should have all the device nodes /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:349 ------------------------------ • ------------------------------ • [SLOW TEST:17.655 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 should start it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:76 ------------------------------ • [SLOW TEST:17.065 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 should attach virt-launcher to it /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:82 ------------------------------ •••• ------------------------------ • [SLOW TEST:54.458 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:170 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Alpine as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:26.233 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with boot order /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:170 should be able to boot from selected disk /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 Cirros as first boot /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:14.929 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:201 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:202 should retry starting the VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:203 ------------------------------ • [SLOW TEST:16.077 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with user-data /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:201 without k8s secret /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:202 should log warning and proceed once the secret is there /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:233 ------------------------------ • [SLOW TEST:49.476 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-launcher crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:281 should be stopped and have Failed phase /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:282 ------------------------------ • [SLOW TEST:25.339 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler crashes /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:304 should recover and continue management /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:305 ------------------------------ • [SLOW TEST:10.344 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler is responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:335 should indicate that a node is ready for vmis /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:336 ------------------------------ • [SLOW TEST:69.300 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 when virt-handler is not responsive /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:366 the node controller should react /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:405 ------------------------------ S [SKIPPING] [0.127 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:458 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-default [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:463 ------------------------------ S [SKIPPING] [0.076 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 with non default namespace /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:458 should log libvirt start and stop lifecycle events of the domain /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 kubevirt-test-alternative [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 Skip log query tests for JENKINS ci test environment /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:463 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.088 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:519 should enable emulation in virt-launcher [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:531 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:527 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.058 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:519 should be reflected in domain XML [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:568 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:527 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.050 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:70 VirtualMachineInstance Emulation Mode /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:519 should request a TUN device but not KVM [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:612 Software emulation is not enabled on this cluster /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:527 ------------------------------ •••• ------------------------------ • [SLOW TEST:18.076 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance's Pod /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:764 should result in the VirtualMachineInstance moving to a finalized state /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:765 ------------------------------ • [SLOW TEST:36.193 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:796 with an active pod. /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:797 should result in pod being terminated /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:798 ------------------------------ • [SLOW TEST:23.360 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Delete a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:796 with grace period greater than 0 /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:821 should run graceful shutdown /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:822 ------------------------------ • [SLOW TEST:31.214 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:873 should be in Failed phase /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:874 ------------------------------ • [SLOW TEST:25.582 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:48 Killed VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:873 should be left alone by virt-handler /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:901 ------------------------------ Waiting for namespace kubevirt-test-default to be removed, this can take a while ... Waiting for namespace kubevirt-test-alternative to be removed, this can take a while ... Summarizing 1 Failure: [Fail] Networking should be able to reach [It] the Inbound VirtualMachineInstance with custom MAC address /root/go/src/kubevirt.io/kubevirt/tests/vmi_networking_test.go:224 Ran 128 of 140 Specs in 4105.793 seconds FAIL! -- 127 Passed | 1 Failed | 0 Pending | 12 Skipped --- FAIL: TestTests (4105.83s) FAIL make: *** [functest] Error 1 + make cluster-down ./cluster/down.sh