+ export WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release + [[ openshift-3.10-release =~ openshift-.* ]] + [[ openshift-3.10-release =~ .*-crio-.* ]] + export KUBEVIRT_PROVIDER=os-3.10.0 + KUBEVIRT_PROVIDER=os-3.10.0 + export KUBEVIRT_NUM_NODES=2 + KUBEVIRT_NUM_NODES=2 + export NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + export NAMESPACE=kube-system + NAMESPACE=kube-system + trap '{ make cluster-down; }' EXIT SIGINT SIGTERM SIGSTOP + make cluster-down ./cluster/down.sh + make cluster-up ./cluster/up.sh Downloading ....... Downloading ....... Downloading ....... 2018/07/03 02:52:34 Waiting for host: 192.168.66.102:22 2018/07/03 02:52:37 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/03 02:52:45 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/03 02:52:53 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/07/03 02:52:59 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: connection refused. Sleeping 5s 2018/07/03 02:53:04 Connected to tcp://192.168.66.102:22 + systemctl stop origin-node.service + rm -rf /etc/origin/ /etc/etcd/ /var/lib/origin /var/lib/etcd/ ++ docker ps -q + containers= + '[' -n '' ']' ++ docker ps -q -a + containers='380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8' + '[' -n '380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8' ']' + docker rm -f 380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8 380578eb6b01 04838495ca18 aef4dea680cc 46bea3472251 6251077554f3 fe2a0d7ec163 970722bcd3fd 1eb051c1b629 1052914cea3e e039c376e094 84eb0c833d7c 4e08778d9b1d 0a9a2fcf5f77 50aebcf65f7b 9faedfc6ec82 f59bf1b05c0b 2ec313cb7901 5a782498db8c fef39cf40a53 c8ca0d879d8b aabbef87da66 0ac6cd26ae28 d021d99166a4 ed496b2ca295 e69e08bd00a8 0ddad7779bb0 3ca535d6f49f 2fc9538787a8 2018/07/03 02:53:07 Waiting for host: 192.168.66.101:22 2018/07/03 02:53:11 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/03 02:53:19 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/03 02:53:27 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/03 02:53:35 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/07/03 02:53:40 Connected to tcp://192.168.66.101:22 + inventory_file=/root/inventory + openshift_ansible=/root/openshift-ansible + echo '[new_nodes]' + sed -i '/\[OSEv3:children\]/a new_nodes' /root/inventory + nodes_found=false ++ seq 2 100 + for i in '$(seq 2 100)' ++ printf node%02d 2 + node=node02 ++ printf 192.168.66.1%02d 2 + node_ip=192.168.66.102 + set +e + ping 192.168.66.102 -c 1 PING 192.168.66.102 (192.168.66.102) 56(84) bytes of data. 64 bytes from 192.168.66.102: icmp_seq=1 ttl=64 time=1.44 ms --- 192.168.66.102 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 1.447/1.447/1.447/0.000 ms Found node02. Adding it to the inventory. + '[' 0 -ne 0 ']' + nodes_found=true + set -e + echo '192.168.66.102 node02' + echo 'Found node02. Adding it to the inventory.' + echo 'node02 openshift_node_group_name="node-config-compute" openshift_schedulable=true openshift_ip=192.168.66.102' + for i in '$(seq 2 100)' ++ printf node%02d 3 + node=node03 ++ printf 192.168.66.1%02d 3 + node_ip=192.168.66.103 + set +e + ping 192.168.66.103 -c 1 PING 192.168.66.103 (192.168.66.103) 56(84) bytes of data. From 192.168.66.101 icmp_seq=1 Destination Host Unreachable --- 192.168.66.103 ping statistics --- 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms + '[' 1 -ne 0 ']' + break + '[' true = true ']' + ansible-playbook -i /root/inventory /root/openshift-ansible/playbooks/openshift-node/scaleup.yml PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) PLAY [Ensure there are new_nodes] ********************************************** TASK [fail] ******************************************************************** skipping: [localhost] TASK [fail] ******************************************************************** skipping: [localhost] PLAY [Initialization Checkpoint Start] ***************************************** TASK [Set install initialization 'In Progress'] ******************************** ok: [node01] PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Evaluate oo_nodes_to_bootstrap] ****************************************** ok: [localhost] => (item=node02) TASK [Add masters to oo_nodes_to_bootstrap] ************************************ ok: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) [WARNING]: Could not match supplied host pattern, ignoring: oo_lb_to_config PLAY [Ensure that all non-node hosts are accessible] *************************** TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Initialize basic host facts] ********************************************* TASK [Gathering Facts] ********************************************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/deprecations.yml for node01, node02 TASK [openshift_sanitize_inventory : Check for usage of deprecated variables] *** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : debug] ************************************ skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_stats] ******************************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Assign deprecated variables to correct counterparts] *** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml for node01, node02 included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml for node01, node02 TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Standardize on latest variable names] ***** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Normalize openshift_release] ************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /root/openshift-ansible/roles/openshift_sanitize_inventory/tasks/unsupported.yml for node01, node02 TASK [openshift_sanitize_inventory : Ensure that openshift_use_dnsmasq is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that openshift_node_dnsmasq_install_network_manager_hook is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* skipping: [node01] => (item=openshift_hosted_etcd_storage_kind) skipping: [node02] => (item=openshift_hosted_etcd_storage_kind) TASK [openshift_sanitize_inventory : Ensure that dynamic provisioning is set if using dynamic storage] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure clusterid is set along with the cloudprovider] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that all requires vsphere configuration variables are set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : ensure provider configuration variables are defined] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure removed web console extension variables are not set] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that web console port matches API server port] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : At least one master is schedulable] ******* skipping: [node01] skipping: [node02] TASK [Detecting Operating System from ostree_booted] *************************** ok: [node02] ok: [node01] TASK [set openshift_deployment_type if unset] ********************************** skipping: [node01] skipping: [node02] TASK [check for node already bootstrapped] ************************************* ok: [node02] ok: [node01] TASK [initialize_facts set fact openshift_is_bootstrapped] ********************* ok: [node01] ok: [node02] TASK [initialize_facts set fact openshift_is_atomic and openshift_is_containerized] *** ok: [node01] ok: [node02] TASK [Determine Atomic Host Docker Version] ************************************ skipping: [node01] skipping: [node02] TASK [assert atomic host docker version is 1.12 or later] ********************** skipping: [node01] skipping: [node02] PLAY [Retrieve existing master configs and validate] *************************** TASK [openshift_control_plane : stat] ****************************************** ok: [node01] TASK [openshift_control_plane : slurp] ***************************************** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [openshift_control_plane : Check for file paths outside of /etc/origin/master in master's config] *** ok: [node01] TASK [openshift_control_plane : set_fact] ************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Initialize special first-master variables] ******************************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Disable web console if required] ***************************************** TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Setup yum repositories for all hosts] ************************************ TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Install Red Hat Subscription manager] ******************* skipping: [node02] TASK [rhel_subscribe : Is host already registered?] **************************** skipping: [node02] TASK [rhel_subscribe : Register host] ****************************************** skipping: [node02] TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Determine if OpenShift Pool Already Attached] *********** skipping: [node02] TASK [rhel_subscribe : Attach to OpenShift Pool] ******************************* skipping: [node02] TASK [rhel_subscribe : Satellite preparation] ********************************** skipping: [node02] TASK [openshift_repos : openshift_repos detect ostree] ************************* ok: [node02] TASK [openshift_repos : Ensure libselinux-python is installed] ***************** ok: [node02] TASK [openshift_repos : Remove openshift_additional.repo file] ***************** ok: [node02] TASK [openshift_repos : Create any additional repos that are defined] ********** TASK [openshift_repos : include_tasks] ***************************************** skipping: [node02] TASK [openshift_repos : include_tasks] ***************************************** included: /root/openshift-ansible/roles/openshift_repos/tasks/centos_repos.yml for node02 TASK [openshift_repos : Configure origin gpg keys] ***************************** ok: [node02] TASK [openshift_repos : Configure correct origin release repository] *********** ok: [node02] => (item=/root/openshift-ansible/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2) TASK [openshift_repos : Ensure clean repo cache in the event repos have been changed manually] *** changed: [node02] => { "msg": "First run of openshift_repos" } TASK [openshift_repos : Record that openshift_repos already ran] *************** ok: [node02] RUNNING HANDLER [openshift_repos : refresh cache] ****************************** changed: [node02] PLAY [Install packages necessary for installer] ******************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Determine if chrony is installed] **************************************** changed: [node02] [WARNING]: Consider using the yum, dnf or zypper module rather than running rpm. If you need to use command because yum, dnf or zypper is insufficient you can add warn=False to this command task or set command_warnings=False in ansible.cfg to get rid of this message. TASK [Install ntp package] ***************************************************** skipping: [node02] TASK [Start and enable ntpd/chronyd] ******************************************* changed: [node02] TASK [Ensure openshift-ansible installer package deps are installed] *********** ok: [node02] => (item=iproute) ok: [node02] => (item=dbus-python) ok: [node02] => (item=PyYAML) ok: [node02] => (item=python-ipaddress) ok: [node02] => (item=libsemanage-python) ok: [node02] => (item=yum-utils) ok: [node02] => (item=python-docker) PLAY [Initialize cluster facts] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node01] ok: [node02] TASK [get openshift_current_version] ******************************************* ok: [node01] ok: [node02] TASK [set_fact openshift_portal_net if present on masters] ********************* ok: [node01] ok: [node02] TASK [Gather Cluster facts] **************************************************** changed: [node02] changed: [node01] TASK [Set fact of no_proxy_internal_hostnames] ********************************* skipping: [node01] skipping: [node02] TASK [Initialize openshift.node.sdn_mtu] *************************************** changed: [node02] ok: [node01] PLAY [Initialize etcd host variables] ****************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role] ************************************************************ TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** ok: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] => { "msg": "openshift_pkg_version was not defined. Falling back to -3.10.0" } TASK [openshift_version : set_fact] ******************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Verify Requirements] ***************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [Run variable sanity checks] ********************************************** ok: [node01] TASK [Validate openshift_node_groups and openshift_node_group_name] ************ ok: [node01] PLAY [Initialization Checkpoint End] ******************************************* TASK [Set install initialization 'Complete'] *********************************** ok: [node01] PLAY [Validate node hostnames] ************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Query DNS for IP address of node02] ************************************** ok: [node02] TASK [Validate openshift_hostname when defined] ******************************** skipping: [node02] TASK [Validate openshift_ip exists on node when defined] *********************** skipping: [node02] PLAY [Configure os_firewall] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [os_firewall : Detecting Atomic Host Operating System] ******************** ok: [node02] TASK [os_firewall : Set fact r_os_firewall_is_atomic] ************************** ok: [node02] TASK [os_firewall : Fail - Firewalld is not supported on Atomic Host] ********** skipping: [node02] TASK [os_firewall : Install firewalld packages] ******************************** skipping: [node02] TASK [os_firewall : Ensure iptables services are not enabled] ****************** skipping: [node02] => (item=iptables) skipping: [node02] => (item=ip6tables) TASK [os_firewall : Wait 10 seconds after disabling iptables] ****************** skipping: [node02] TASK [os_firewall : Start and enable firewalld service] ************************ skipping: [node02] TASK [os_firewall : need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail] *** skipping: [node02] TASK [os_firewall : Restart polkitd] ******************************************* skipping: [node02] TASK [os_firewall : Wait for polkit action to have been created] *************** skipping: [node02] TASK [os_firewall : Ensure firewalld service is not enabled] ******************* ok: [node02] TASK [os_firewall : Wait 10 seconds after disabling firewalld] ***************** skipping: [node02] TASK [os_firewall : Install iptables packages] ********************************* ok: [node02] => (item=iptables) ok: [node02] => (item=iptables-services) TASK [os_firewall : Start and enable iptables service] ************************* ok: [node02 -> node02] => (item=node02) TASK [os_firewall : need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail] *** skipping: [node02] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [container_runtime : Setup the docker-storage for overlay] **************** skipping: [node02] TASK [container_runtime : Create file system on extra volume device] *********** TASK [container_runtime : Create mount entry for extra volume] ***************** PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** ok: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* ok: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Add http_proxy to /etc/atomic.conf] ****************** skipping: [node02] TASK [container_runtime : Add https_proxy to /etc/atomic.conf] ***************** skipping: [node02] TASK [container_runtime : Add no_proxy to /etc/atomic.conf] ******************** skipping: [node02] TASK [container_runtime : Get current installed Docker version] **************** ok: [node02] TASK [container_runtime : Error out if Docker pre-installed but too old] ******* skipping: [node02] TASK [container_runtime : Error out if requested Docker is too old] ************ skipping: [node02] TASK [container_runtime : Install Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure docker.service.d directory exists] ************ ok: [node02] TASK [container_runtime : Configure Docker service unit file] ****************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Set registry params] ********************************* skipping: [node02] => (item={u'reg_conf_var': u'ADD_REGISTRY', u'reg_flag': u'--add-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'BLOCK_REGISTRY', u'reg_flag': u'--block-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'INSECURE_REGISTRY', u'reg_flag': u'--insecure-registry', u'reg_fact_val': []}) TASK [container_runtime : Place additional/blocked/insecure registries in /etc/containers/registries.conf] *** skipping: [node02] TASK [container_runtime : Set Proxy Settings] ********************************** skipping: [node02] => (item={u'reg_conf_var': u'HTTP_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'HTTPS_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'NO_PROXY', u'reg_fact_val': u''}) TASK [container_runtime : Set various Docker options] ************************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Configure Docker Network OPTIONS] ******************** ok: [node02] TASK [container_runtime : Detect if docker is already started] ***************** ok: [node02] TASK [container_runtime : Start the Docker service] **************************** ok: [node02] TASK [container_runtime : set_fact] ******************************************** ok: [node02] TASK [container_runtime : Check for docker_storage_path/overlay2] ************** ok: [node02] TASK [container_runtime : Fixup SELinux permissions for docker] **************** changed: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] TASK [container_runtime : Fail if Atomic Host since this is an rpm request] **** skipping: [node02] TASK [container_runtime : Getting current systemd-udevd exec command] ********** skipping: [node02] TASK [container_runtime : Assure systemd-udevd.service.d directory exists] ***** skipping: [node02] TASK [container_runtime : Create systemd-udevd override file] ****************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : Check that overlay is in the kernel] ***************** skipping: [node02] TASK [container_runtime : Add overlay to modprobe.d] *************************** skipping: [node02] TASK [container_runtime : Manually modprobe overlay into the kernel] *********** skipping: [node02] TASK [container_runtime : Enable and start systemd-modules-load] *************** skipping: [node02] TASK [container_runtime : Install cri-o] *************************************** skipping: [node02] TASK [container_runtime : Remove CRI-O default configuration files] ************ skipping: [node02] => (item=/etc/cni/net.d/200-loopback.conf) skipping: [node02] => (item=/etc/cni/net.d/100-crio-bridge.conf) TASK [container_runtime : Create the CRI-O configuration] ********************** skipping: [node02] TASK [container_runtime : Ensure CNI configuration directory exists] *********** skipping: [node02] TASK [container_runtime : Add iptables allow rules] **************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove iptables rules] ******************************* TASK [container_runtime : Add firewalld allow rules] *************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove firewalld allow rules] ************************ TASK [container_runtime : Configure the CNI network] *************************** skipping: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-network] ****************** skipping: [node02] TASK [container_runtime : Start the CRI-O service] ***************************** skipping: [node02] TASK [container_runtime : Ensure /var/lib/containers exists] ******************* skipping: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** skipping: [node02] TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** skipping: [node02] TASK [container_runtime : stop the current running docker] ********************* skipping: [node02] TASK [container_runtime : copy "/var/lib/docker" to "/var/lib/containers/docker"] *** skipping: [node02] TASK [container_runtime : Set the selinux context on /var/lib/containers/docker] *** skipping: [node02] TASK [container_runtime : restorecon the /var/lib/containers/docker] *********** skipping: [node02] TASK [container_runtime : Remove the old docker location] ********************** skipping: [node02] TASK [container_runtime : Setup the link] ************************************** skipping: [node02] TASK [container_runtime : start docker] **************************************** skipping: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role] ************************************************************ TASK [openshift_version : Use openshift_current_version fact as version to configure if already installed] *** skipping: [node01] TASK [openshift_version : Set openshift_version to openshift_release if undefined] *** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : assert openshift_release in openshift_image_tag] ***** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : assert openshift_release in openshift_pkg_version] *** ok: [node01] => { "changed": false, "msg": "All assertions passed" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_release": "3.10" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_image_tag": "v3.10.0-rc.0" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_pkg_version": "-3.10.0*" } TASK [openshift_version : debug] *********************************************** ok: [node01] => { "openshift_version": "3.10.0" } TASK [set openshift_version booleans (first master)] *************************** ok: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] TASK [set openshift_version booleans (masters and nodes)] ********************** ok: [node02] PLAY [Node Preparation Checkpoint Start] *************************************** TASK [Set Node preparation 'In Progress'] ************************************** ok: [node01] PLAY [Only target nodes that have not yet been bootstrapped] ******************* TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [add_host] **************************************************************** skipping: [localhost] => (item=node02) ok: [localhost] => (item=node01) PLAY [Disable excluders] ******************************************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/disable.yml for node02 TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-docker-excluder version detected] ************ skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-docker-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Get available excluder version] ********************* skipping: [node02] TASK [openshift_excluder : Fail when excluder package is not found] ************ skipping: [node02] TASK [openshift_excluder : Set fact excluder_version] ************************** skipping: [node02] TASK [openshift_excluder : origin-excluder version detected] ******************* skipping: [node02] TASK [openshift_excluder : Printing upgrade target version] ******************** skipping: [node02] TASK [openshift_excluder : Check the available origin-excluder version is at most of the upgrade target version] *** skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** skipping: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] PLAY [Configure nodes] ********************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_cloud_provider : Set cloud provider facts] ********************* skipping: [node02] TASK [openshift_cloud_provider : Create cloudprovider config dir] ************** skipping: [node02] TASK [openshift_cloud_provider : include the defined cloud provider files] ***** skipping: [node02] TASK [openshift_node : fail] *************************************************** skipping: [node02] TASK [openshift_node : Check for NetworkManager service] *********************** ok: [node02] TASK [openshift_node : Set fact using_network_manager] ************************* ok: [node02] TASK [openshift_node : Install dnsmasq] **************************************** ok: [node02] TASK [openshift_node : ensure origin/node directory exists] ******************** changed: [node02] => (item=/etc/origin) changed: [node02] => (item=/etc/origin/node) TASK [openshift_node : Install NetworkManager during node_bootstrap provisioning] *** skipping: [node02] TASK [openshift_node : Install network manager dispatch script] **************** skipping: [node02] TASK [openshift_node : Install dnsmasq configuration] ************************** ok: [node02] TASK [openshift_node : Deploy additional dnsmasq.conf] ************************* skipping: [node02] TASK [openshift_node : Enable dnsmasq] ***************************************** ok: [node02] TASK [openshift_node : Install network manager dispatch script] **************** ok: [node02] TASK [openshift_node : Add iptables allow rules] ******************************* ok: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) ok: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) ok: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) ok: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) ok: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove iptables rules] ********************************** TASK [openshift_node : Add firewalld allow rules] ****************************** skipping: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) skipping: [node02] => (item={u'port': u'10256/tcp', u'service': u'Kubernetes kube-proxy health check for service load balancers'}) skipping: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) skipping: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) skipping: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove firewalld allow rules] *************************** TASK [openshift_node : Checking for journald.conf] ***************************** ok: [node02] TASK [openshift_node : Create journald persistence directories] **************** ok: [node02] TASK [openshift_node : Update journald setup] ********************************** ok: [node02] => (item={u'var': u'Storage', u'val': u'persistent'}) ok: [node02] => (item={u'var': u'Compress', u'val': True}) ok: [node02] => (item={u'var': u'SyncIntervalSec', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitInterval', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitBurst', u'val': 10000}) ok: [node02] => (item={u'var': u'SystemMaxUse', u'val': u'8G'}) ok: [node02] => (item={u'var': u'SystemKeepFree', u'val': u'20%'}) ok: [node02] => (item={u'var': u'SystemMaxFileSize', u'val': u'10M'}) ok: [node02] => (item={u'var': u'MaxRetentionSec', u'val': u'1month'}) ok: [node02] => (item={u'var': u'MaxFileSec', u'val': u'1day'}) ok: [node02] => (item={u'var': u'ForwardToSyslog', u'val': False}) ok: [node02] => (item={u'var': u'ForwardToWall', u'val': False}) TASK [openshift_node : Restart journald] *************************************** skipping: [node02] TASK [openshift_node : Disable swap] ******************************************* ok: [node02] TASK [openshift_node : Install node, clients, and conntrack packages] ********** ok: [node02] => (item={u'name': u'origin-node-3.10.0*'}) ok: [node02] => (item={u'name': u'origin-clients-3.10.0*'}) ok: [node02] => (item={u'name': u'conntrack-tools'}) TASK [openshift_node : Restart cri-o] ****************************************** skipping: [node02] TASK [openshift_node : restart NetworkManager to ensure resolv.conf is present] *** changed: [node02] TASK [openshift_node : sysctl] ************************************************* ok: [node02] TASK [openshift_node : Check for credentials file for registry auth] *********** skipping: [node02] TASK [openshift_node : Create credentials for registry auth] ******************* skipping: [node02] TASK [openshift_node : Create credentials for registry auth (alternative)] ***** skipping: [node02] TASK [openshift_node : Setup ro mount of /root/.docker for containerized hosts] *** skipping: [node02] TASK [openshift_node : Check that node image is present] *********************** changed: [node02] TASK [openshift_node : Pre-pull node image] ************************************ skipping: [node02] TASK [openshift_node : Copy node script to the node] *************************** ok: [node02] TASK [openshift_node : Install Node service file] ****************************** ok: [node02] TASK [openshift_node : Ensure old system path is set] ************************** skipping: [node02] => (item=/etc/origin/openvswitch) skipping: [node02] => (item=/var/lib/kubelet) skipping: [node02] => (item=/opt/cni/bin) TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Copy node container image to ostree storage] ************ skipping: [node02] TASK [openshift_node : Install or Update node system container] **************** skipping: [node02] TASK [openshift_node : Restart network manager to ensure networking configuration is in place] *** skipping: [node02] TASK [openshift_node : Configure Node settings] ******************************** ok: [node02] => (item={u'regex': u'^OPTIONS=', u'line': u'OPTIONS='}) ok: [node02] => (item={u'regex': u'^DEBUG_LOGLEVEL=', u'line': u'DEBUG_LOGLEVEL=2'}) ok: [node02] => (item={u'regex': u'^IMAGE_VERSION=', u'line': u'IMAGE_VERSION=v3.10.0-rc.0'}) TASK [openshift_node : Configure Proxy Settings] ******************************* skipping: [node02] => (item={u'regex': u'^HTTP_PROXY=', u'line': u'HTTP_PROXY='}) skipping: [node02] => (item={u'regex': u'^HTTPS_PROXY=', u'line': u'HTTPS_PROXY='}) skipping: [node02] => (item={u'regex': u'^NO_PROXY=', u'line': u'NO_PROXY=[],172.30.0.0/16,10.128.0.0/14'}) TASK [openshift_node : file] *************************************************** skipping: [node02] TASK [openshift_node : Create the Node config] ********************************* changed: [node02] TASK [openshift_node : Configure Node Environment Variables] ******************* TASK [openshift_node : Ensure the node static pod directory exists] ************ changed: [node02] TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : Check status of node image pre-pull] ******************** skipping: [node02] TASK [openshift_node : Install NFS storage plugin dependencies] **************** ok: [node02] TASK [openshift_node : Check for existence of nfs sebooleans] ****************** ok: [node02] => (item=virt_use_nfs) ok: [node02] => (item=virt_sandbox_use_nfs) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-03 03:00:18.163709', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.010036', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-03 03:00:18.153673', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-03 03:00:19.457926', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.009853', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-03 03:00:19.448073', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-03 03:00:18.163709', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.010036', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-03 03:00:18.153673', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-03 03:00:19.457926', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.009853', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-07-03 03:00:19.448073', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install GlusterFS storage plugin dependencies] ********** ok: [node02] TASK [openshift_node : Check for existence of fusefs sebooleans] *************** ok: [node02] => (item=virt_use_fusefs) ok: [node02] => (item=virt_sandbox_use_fusefs) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-03 03:00:25.876265', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.009599', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-03 03:00:25.866666', '_ansible_ignore_errors': None, 'failed': False}) ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-03 03:00:27.111656', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.005862', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-03 03:00:27.105794', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-03 03:00:25.876265', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.009599', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-03 03:00:25.866666', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-07-03 03:00:27.111656', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.005862', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-07-03 03:00:27.105794', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Install Ceph storage plugin dependencies] *************** ok: [node02] TASK [openshift_node : Install iSCSI storage plugin dependencies] ************** ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=device-mapper-multipath) TASK [openshift_node : restart services] *************************************** ok: [node02] => (item=multipathd) ok: [node02] => (item=rpcbind) ok: [node02] => (item=iscsid) TASK [openshift_node : Template multipath configuration] *********************** changed: [node02] TASK [openshift_node : Enable and start multipath] ***************************** changed: [node02] TASK [tuned : Check for tuned package] ***************************************** ok: [node02] TASK [tuned : Set tuned OpenShift variables] *********************************** ok: [node02] TASK [tuned : Ensure directory structure exists] ******************************* ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Ensure files are populated from templates] *********************** skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'state': 'directory', 'ctime': 1529614575.1009853, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'recommend.conf', 'size': 290, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) ok: [node02] => (item={'src': u'/root/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'unconfined_u', 'serole': 'object_r', 'ctime': 1529614575.1009853, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1529614575.1009853, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 594, 'root': u'/root/openshift-ansible/roles/tuned/templates', 'setype': 'admin_home_t'}) TASK [tuned : Make tuned use the recommended tuned profile on restart] ********* changed: [node02] => (item=/etc/tuned/active_profile) changed: [node02] => (item=/etc/tuned/profile_mode) TASK [tuned : Restart tuned service] ******************************************* changed: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Install logrotate] ******* ok: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Setup logrotate.d scripts] *** PLAY [node bootstrap config] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_node : install needed rpm(s)] ********************************** ok: [node02] => (item=origin-node) ok: [node02] => (item=origin-docker-excluder) ok: [node02] => (item=ansible) ok: [node02] => (item=bash-completion) ok: [node02] => (item=docker) ok: [node02] => (item=haproxy) ok: [node02] => (item=dnsmasq) ok: [node02] => (item=ntp) ok: [node02] => (item=logrotate) ok: [node02] => (item=httpd-tools) ok: [node02] => (item=bind-utils) ok: [node02] => (item=firewalld) ok: [node02] => (item=libselinux-python) ok: [node02] => (item=conntrack-tools) ok: [node02] => (item=openssl) ok: [node02] => (item=iproute) ok: [node02] => (item=python-dbus) ok: [node02] => (item=PyYAML) ok: [node02] => (item=yum-utils) ok: [node02] => (item=glusterfs-fuse) ok: [node02] => (item=device-mapper-multipath) ok: [node02] => (item=nfs-utils) ok: [node02] => (item=cockpit-ws) ok: [node02] => (item=cockpit-system) ok: [node02] => (item=cockpit-bridge) ok: [node02] => (item=cockpit-docker) ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=ceph-common) TASK [openshift_node : create the directory for node] ************************** skipping: [node02] TASK [openshift_node : laydown systemd override] ******************************* skipping: [node02] TASK [openshift_node : update the sysconfig to have necessary variables] ******* ok: [node02] => (item={u'regexp': u'^KUBECONFIG=.*', u'line': u'KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig'}) TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) skipping: [node02] TASK [openshift_node : disable origin-node service] **************************** changed: [node02] => (item=origin-node.service) TASK [openshift_node : Check for RPM generated config marker file .config_managed] *** ok: [node02] TASK [openshift_node : create directories for bootstrapping] ******************* ok: [node02] => (item=/root/openshift_bootstrap) changed: [node02] => (item=/var/lib/origin/openshift.local.config) changed: [node02] => (item=/var/lib/origin/openshift.local.config/node) ok: [node02] => (item=/etc/docker/certs.d/docker-registry.default.svc:5000) TASK [openshift_node : laydown the bootstrap.yml file for on boot configuration] *** ok: [node02] TASK [openshift_node : Create a symlink to the node client CA for the docker registry] *** ok: [node02] TASK [openshift_node : Remove RPM generated config files if present] *********** skipping: [node02] => (item=master) skipping: [node02] => (item=.config_managed) TASK [openshift_node : find all files in /etc/origin/node so we can remove them] *** skipping: [node02] TASK [openshift_node : Remove everything except the resolv.conf required for node] *** skipping: [node02] TASK [openshift_node_group : create node config template] ********************** changed: [node02] TASK [openshift_node_group : remove existing node config] ********************** changed: [node02] TASK [openshift_node_group : Ensure required directories are present] ********** ok: [node02] => (item=/etc/origin/node/pods) changed: [node02] => (item=/etc/origin/node/certificates) TASK [openshift_node_group : Update the sysconfig to group "node-config-compute"] *** changed: [node02] TASK [set_fact] **************************************************************** ok: [node02] PLAY [Re-enable excluder if it was previously enabled] ************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /root/openshift-ansible/roles/openshift_excluder/tasks/enable.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] PLAY [Node Preparation Checkpoint End] ***************************************** TASK [Set Node preparation 'Complete'] ***************************************** ok: [node01] PLAY [Distribute bootstrap and start nodes] ************************************ TASK [openshift_node : Gather node information] ******************************** changed: [node02] ok: [node01] TASK [openshift_node : Copy master bootstrap config locally] ******************* ok: [node02] TASK [openshift_node : Distribute bootstrap kubeconfig if one does not exist] *** ok: [node01] changed: [node02] TASK [openshift_node : Start and enable node for bootstrapping] **************** changed: [node01] changed: [node02] TASK [openshift_node : Get node logs] ****************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : debug] ************************************************** skipping: [node02] skipping: [node01] TASK [openshift_node : fail] *************************************************** skipping: [node02] skipping: [node01] PLAY [Approve any pending CSR requests from inventory nodes] ******************* TASK [Dump all candidate bootstrap hostnames] ********************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Find all hostnames for bootstrapping] ************************************ ok: [node01] TASK [Dump the bootstrap hostnames] ******************************************** ok: [node01] => { "msg": [ "node02", "node01" ] } TASK [Approve bootstrap nodes] ************************************************* changed: [node01] TASK [Get CSRs] **************************************************************** skipping: [node01] TASK [Report approval errors] ************************************************** skipping: [node01] PLAY [Ensure any inventory labels are applied to the nodes] ******************** TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] TASK [openshift_manage_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_manage_node : Wait for Node Registration] ********************** ok: [node02 -> node01] ok: [node01 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/config.yml for node02, node01 TASK [openshift_manage_node : Set node schedulability] ************************* ok: [node02 -> node01] ok: [node01 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /root/openshift-ansible/roles/openshift_manage_node/tasks/set_default_node_role.yml for node02, node01 TASK [openshift_manage_node : Retrieve nodes that are marked with the infra selector or the legacy infra selector] *** ok: [node02 -> node01] TASK [openshift_manage_node : Label infra or legacy infra nodes with the new role label] *** TASK [openshift_manage_node : Retrieve non-infra, non-master nodes that are not yet labeled compute] *** ok: [node02 -> node01] TASK [openshift_manage_node : label non-master non-infra nodes compute] ******** TASK [openshift_manage_node : Label all-in-one master as a compute node] ******* skipping: [node02] PLAY RECAP ********************************************************************* localhost : ok=30 changed=0 unreachable=0 failed=0 node01 : ok=71 changed=3 unreachable=0 failed=0 node02 : ok=155 changed=33 unreachable=0 failed=0 INSTALLER STATUS *************************************************************** Initialization : Complete (0:02:53) Node Preparation : Complete (0:04:23) Sending file modes: C0755 110489328 oc Sending file modes: C0600 5645 admin.kubeconfig Cluster "node01:8443" set. Cluster "node01:8443" set. + set +e + kubectl get nodes --no-headers + cluster/kubectl.sh get nodes --no-headers node01 Ready compute,infra,master 11d v1.10.0+b81c8f8 node02 Ready compute 56s v1.10.0+b81c8f8 + kubectl_rc=0 + '[' 0 -ne 0 ']' ++ kubectl get nodes --no-headers ++ grep NotReady ++ cluster/kubectl.sh get nodes --no-headers + '[' -n '' ']' + set -e + echo 'Nodes are ready:' Nodes are ready: + kubectl get nodes + cluster/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION node01 Ready compute,infra,master 11d v1.10.0+b81c8f8 node02 Ready compute 57s v1.10.0+b81c8f8 + make cluster-sync ./cluster/build.sh Building ... Untagged: localhost:32792/kubevirt/virt-controller:devel Untagged: localhost:32792/kubevirt/virt-controller@sha256:fcf23163497d8dc2cd5e7e13587bb207a4c4f890d8edfd6207ab04071f6e9032 Deleted: sha256:78edc65214941f33cbfe020efecb66bd6f1cece3b6930f07e5498634aeefcb58 Deleted: sha256:26189768ff86e3de2033c72a9f9d7ea32eff38c0ef9df279007c9c41769398b3 Deleted: sha256:a848356f7342d419b1ee630f79655008282c59e73b151649df53b568cfae226a Deleted: sha256:657379cfe62acf0139025ade9289c84a61c4cb8de09452f2a8abc3e80d2f3522 Untagged: localhost:32792/kubevirt/virt-launcher:devel Untagged: localhost:32792/kubevirt/virt-launcher@sha256:57fa010eef646237d7d9a524843b3fc7df4d65b0e60f0ad11df866117d7658eb Deleted: sha256:53ccb0ca39bd4eb415dabd9a560757b9ccf30ae1bce239f9067a6fb04ac9bcab Deleted: sha256:1c2ee685f4b5e2ceb7e4c0418db9d67665b8cc31f2b718c3c00fd3f90641e356 Deleted: sha256:f92cbb24f752889505d78f819ddb0272d26b725896b3d2d629789e72e1eece0d Deleted: sha256:7dfaf848c459342dc71c8a4e37569843a3d9ddb214a9b761d92a478b8db249b7 Deleted: sha256:a58506a3a9972ef0aae63fa3a442f8f4e54b6ef394bfc2ec2e8bc5812f9dc118 Deleted: sha256:4c8ccf95d8fcf9ca330f5993230d976750e3c003b6d02dad4935e75484b65679 Deleted: sha256:35de9b918e4a4018e61822d200b8ca9b66605d6f5580cdc003197cab7f693182 Deleted: sha256:1420e18baa4ca186cf1fd1acea97fc59b2fe7655749e40564aa1474908806dca Deleted: sha256:72980e418f32d16f43706e80282e359496f800462d12019c3eb906302ebf43af Deleted: sha256:c242658b4c0055224596a7d628de19ba11b1289c72dabc37afc69b0bdda30cba Deleted: sha256:e7ee5e217c9c615794681168310ec1a0a222cac861eefacc9aba38d263af8f46 Deleted: sha256:2a744364c5753f1f9405dc786e3acb7d06b23e61dd622d16a9c96a380aeb36e7 Untagged: localhost:32792/kubevirt/virt-handler:devel Untagged: localhost:32792/kubevirt/virt-handler@sha256:a68764861611cb57fab624ab6f08e32af19f22388b9d88e1c06123a41f89901c Deleted: sha256:cc10a7066de180a2bc9d173bc2421e56636c512a08ed546a0fdd33d7f3a4843d Deleted: sha256:8ea3071615736b1b09f9609cb2937555fbc757e672bef2f82475628984154791 Deleted: sha256:6c6d38fb64e3a26ccef86378b40f7cdbfc5ebf5e4f6e149f429449acc6391843 Deleted: sha256:630680d2a1d88bd9cd081373df03045b164fe808cba2b0399299851e7ac7443e Untagged: localhost:32792/kubevirt/virt-api:devel Untagged: localhost:32792/kubevirt/virt-api@sha256:55d8d481b989663d22e2cef8a21d412cff3853da80d7f26d724e8d718d11b318 Deleted: sha256:149f4660e123a7c7216dd61d6a730dbc3599a22ca46e6949b7c50fc919004bf3 Deleted: sha256:02447c6e56eee0b4f15fbace75a5512838c55006e2aaf0f920c011f9ac8e5250 Deleted: sha256:f3ebf033619980b3eca80f38089ea66b6709eea1edfdb2fe68d603affa51fb20 Deleted: sha256:4f4736c19a1fb8d12340438b54da33aac5de445f3076184fce074c499c88f2d9 Untagged: localhost:32792/kubevirt/subresource-access-test:devel Untagged: localhost:32792/kubevirt/subresource-access-test@sha256:0ee7e20c3561253bae7ce68f66bd5f0a80430566858d4707b544bebac222c1f1 Deleted: sha256:040df53f7f7bb1dee24904f23ff963868eaf1b78b32b96f29c9df70b5b34e7af Deleted: sha256:b92998d435ffab9e144a80120f470b7bbde6da0057c3a0027f09ccfac11d4b8d Deleted: sha256:5b72ab38c0e2ff5ff7f10cf75b453d7a5db0f96096be614cccd2e276edd3df52 Deleted: sha256:b7ac4ad5126d68fda0bc3132f33ca87c300957070c887c09e8e6981eba6eddac sha256:0e52976342d8bb3bff850b704549f06d30e55824f50309732b55e928864b05c5 go version go1.10 linux/amd64 go version go1.10 linux/amd64 make[1]: Entering directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' hack/dockerized "./hack/check.sh && KUBEVIRT_VERSION= ./hack/build-go.sh install " && ./hack/build-copy-artifacts.sh sha256:0e52976342d8bb3bff850b704549f06d30e55824f50309732b55e928864b05c5 go version go1.10 linux/amd64 go version go1.10 linux/amd64 find: '/root/go/src/kubevirt.io/kubevirt/_out/cmd': No such file or directory Compiling tests... compiled tests.test hack/build-docker.sh build Sending build context to Docker daemon 38.38 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-controller ---> Using cache ---> 65d6d48cdb35 Step 4/8 : WORKDIR /home/virt-controller ---> Using cache ---> e1ade8663337 Step 5/8 : USER 1001 ---> Using cache ---> 2ce44d6f372a Step 6/8 : COPY virt-controller /usr/bin/virt-controller ---> 817882afba91 Removing intermediate container b9e4dc1803fc Step 7/8 : ENTRYPOINT /usr/bin/virt-controller ---> Running in 84adc49b8944 ---> 0158df016f0f Removing intermediate container 84adc49b8944 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-controller" '' ---> Running in 9a26ae2c5040 ---> d885b4642f45 Removing intermediate container 9a26ae2c5040 Successfully built d885b4642f45 Sending build context to Docker daemon 39.09 MB Step 1/10 : FROM kubevirt/libvirt:3.7.0 ---> 60c80c8f7523 Step 2/10 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> d4ddb23dff45 Step 3/10 : RUN dnf -y install socat genisoimage util-linux libcgroup-tools ethtool net-tools sudo && dnf -y clean all && test $(id -u qemu) = 107 # make sure that the qemu user really is 107 ---> Using cache ---> 5563e87cb74c Step 4/10 : COPY virt-launcher /usr/bin/virt-launcher ---> 6b473bd0a973 Removing intermediate container 827b63bc2b9f Step 5/10 : COPY kubevirt-sudo /etc/sudoers.d/kubevirt ---> 0d0c8cf4bd96 Removing intermediate container 0822f1f5e732 Step 6/10 : RUN setcap CAP_NET_BIND_SERVICE=+eip /usr/bin/qemu-system-x86_64 ---> Running in 1eca3db43660  ---> 2bedaffd5e3b Removing intermediate container 1eca3db43660 Step 7/10 : RUN mkdir -p /usr/share/kubevirt/virt-launcher ---> Running in 3fdf6ea51031  ---> 5563fff264a9 Removing intermediate container 3fdf6ea51031 Step 8/10 : COPY entrypoint.sh libvirtd.sh sh.sh sock-connector /usr/share/kubevirt/virt-launcher/ ---> 989ac7f0a309 Removing intermediate container 81590ba21901 Step 9/10 : ENTRYPOINT /usr/share/kubevirt/virt-launcher/entrypoint.sh ---> Running in 037a2ad8c68c ---> 4c06e66f1b12 Removing intermediate container 037a2ad8c68c Step 10/10 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-launcher" '' ---> Running in 33c66d438f6f ---> 269aa689dbfc Removing intermediate container 33c66d438f6f Successfully built 269aa689dbfc Sending build context to Docker daemon 40.64 MB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/5 : COPY virt-handler /usr/bin/virt-handler ---> 182d6aaf62f3 Removing intermediate container 3fde116445ad Step 4/5 : ENTRYPOINT /usr/bin/virt-handler ---> Running in 896a7745d24f ---> b4b19f5bee1d Removing intermediate container 896a7745d24f Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-handler" '' ---> Running in 27735d923524 ---> b551ef11329c Removing intermediate container 27735d923524 Successfully built b551ef11329c Sending build context to Docker daemon 38.12 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-api ---> Using cache ---> 2eeb55f39191 Step 4/8 : WORKDIR /home/virt-api ---> Using cache ---> 56cea32a45d4 Step 5/8 : USER 1001 ---> Using cache ---> d121920c238b Step 6/8 : COPY virt-api /usr/bin/virt-api ---> 8cf331443cfe Removing intermediate container e2425f06eb34 Step 7/8 : ENTRYPOINT /usr/bin/virt-api ---> Running in 510073a88b4f ---> d1c6a55c5bb7 Removing intermediate container 510073a88b4f Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "virt-api" '' ---> Running in 0b70aa1678aa ---> c756748809d9 Removing intermediate container 0b70aa1678aa Successfully built c756748809d9 Sending build context to Docker daemon 4.608 kB Step 1/7 : FROM fedora:27 ---> 9110ae7f579f Step 2/7 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/7 : ENV container docker ---> Using cache ---> 32cab959eac8 Step 4/7 : RUN mkdir -p /images/custom /images/alpine /images/datavolume1 /images/datavolume2 /images/datavolume3 && truncate -s 64M /images/custom/disk.img && curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /images/alpine/disk.img ---> Using cache ---> 2af3c219fa31 Step 5/7 : ADD entrypoint.sh / ---> Using cache ---> d75d502b509c Step 6/7 : CMD /entrypoint.sh ---> Using cache ---> 6342a68a22f7 Step 7/7 : LABEL "disks-images-provider" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Using cache ---> 5a3d6f47173e Successfully built 5a3d6f47173e Sending build context to Docker daemon 2.56 kB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/5 : ENV container docker ---> Using cache ---> 32cab959eac8 Step 4/5 : RUN dnf -y install procps-ng nmap-ncat && dnf -y clean all ---> Using cache ---> 391fa00b27f9 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "vm-killer" '' ---> Using cache ---> 87ddb36833db Successfully built 87ddb36833db Sending build context to Docker daemon 5.12 kB Step 1/7 : FROM debian:sid ---> bcec0ae8107e Step 2/7 : MAINTAINER "David Vossel" \ ---> Using cache ---> 6696837acee7 Step 3/7 : ENV container docker ---> Using cache ---> 2dd2b1a02be6 Step 4/7 : RUN apt-get update && apt-get install -y bash curl bzip2 qemu-utils && mkdir -p /disk && rm -rf /var/lib/apt/lists/* ---> Using cache ---> dd3c4950b5c8 Step 5/7 : ADD entry-point.sh / ---> Using cache ---> d221e0eb5770 Step 6/7 : CMD /entry-point.sh ---> Using cache ---> 6506e61a9f41 Step 7/7 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "registry-disk-v1alpha" '' ---> Using cache ---> 1e0eccc83e91 Successfully built 1e0eccc83e91 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32975/kubevirt/registry-disk-v1alpha:devel ---> 1e0eccc83e91 Step 2/4 : MAINTAINER "David Vossel" \ ---> Using cache ---> 1d52486221a6 Step 3/4 : RUN curl https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img > /disk/cirros.img ---> Using cache ---> 03bc90bbb22f Step 4/4 : LABEL "cirros-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Using cache ---> a6b4225c5dc6 Successfully built a6b4225c5dc6 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32975/kubevirt/registry-disk-v1alpha:devel ---> 1e0eccc83e91 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> d9d76f0d57f8 Step 3/4 : RUN curl -g -L https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 > /disk/fedora.qcow2 ---> Using cache ---> d2397c1e8bca Step 4/4 : LABEL "fedora-cloud-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Using cache ---> 9e4cd80f1ac6 Successfully built 9e4cd80f1ac6 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32975/kubevirt/registry-disk-v1alpha:devel ---> 1e0eccc83e91 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> d9d76f0d57f8 Step 3/4 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /disk/alpine.iso ---> Using cache ---> 7355b1cb93e8 Step 4/4 : LABEL "alpine-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Using cache ---> 7871cedc4aba Successfully built 7871cedc4aba Sending build context to Docker daemon 34.91 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virtctl ---> Using cache ---> 6e6e1b7931e0 Step 4/8 : WORKDIR /home/virtctl ---> Using cache ---> 9d27e69a25f2 Step 5/8 : USER 1001 ---> Using cache ---> 1760a8e197af Step 6/8 : COPY subresource-access-test /subresource-access-test ---> b939dfc9b0be Removing intermediate container e99e4fa88d97 Step 7/8 : ENTRYPOINT /subresource-access-test ---> Running in 20544d9b62b9 ---> 8ec1b8785b78 Removing intermediate container 20544d9b62b9 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "subresource-access-test" '' ---> Running in 61fe16771a2b ---> d295e69ec618 Removing intermediate container 61fe16771a2b Successfully built d295e69ec618 Sending build context to Docker daemon 3.072 kB Step 1/9 : FROM fedora:27 ---> 9110ae7f579f Step 2/9 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/9 : ENV container docker ---> Using cache ---> 32cab959eac8 Step 4/9 : RUN dnf -y install make git gcc && dnf -y clean all ---> Using cache ---> 8e034c77f534 Step 5/9 : ENV GIMME_GO_VERSION 1.9.2 ---> Using cache ---> 28ec1d482013 Step 6/9 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> db78d0286f58 Step 7/9 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 7ebe54e98be4 Step 8/9 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/masterzen/winrm-cli ---> Using cache ---> a3b04c1816f5 Step 9/9 : LABEL "kubevirt-functional-tests-openshift-3.10-release0" '' "winrmcli" '' ---> Using cache ---> d275090998d5 Successfully built d275090998d5 Sending build context to Docker daemon 4.096 kB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/8 : ENV container docker ---> Using cache ---> 32cab959eac8 Step 4/8 : RUN dnf install -y nginx && dnf -y clean all ---> Using cache ---> 1458ab8a4ff8 Step 5/8 : RUN mkdir -p /usr/share/nginx/html/images && curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /usr/share/nginx/html/images/alpine.iso && rm -f /etc/nginx/nginx.conf ---> Using cache ---> 6b96398c76f7 Step 6/8 : ADD nginx.conf /etc/nginx/ ---> Using cache ---> 19b31a644a31 Step 7/8 : EXPOSE 80 ---> Using cache ---> 11692d1157a3 Step 8/8 : LABEL "cdi-http-import-server" '' "kubevirt-functional-tests-openshift-3.10-release0" '' ---> Using cache ---> 55416e8738de Successfully built 55416e8738de hack/build-docker.sh push The push refers to a repository [localhost:32975/kubevirt/virt-controller] 2d411648ffb5: Preparing 52069b1f5033: Preparing 39bae602f753: Preparing 52069b1f5033: Pushed 2d411648ffb5: Pushed 39bae602f753: Pushed devel: digest: sha256:0c719bb3c555efc724f52c5a4a48aa6ed184fe34447dcd57b2badb4bbc590536 size: 949 The push refers to a repository [localhost:32975/kubevirt/virt-launcher] 8afcf30feef6: Preparing f540870f7f64: Preparing f7c0d5be4cb9: Preparing 393b15618b97: Preparing f3268140b0a2: Preparing 05dda64adde7: Preparing 530cc55618cd: Preparing 34fa414dfdf6: Preparing a1359dc556dd: Preparing 490c7c373332: Preparing 4b440db36f72: Preparing 39bae602f753: Preparing 05dda64adde7: Waiting 490c7c373332: Waiting a1359dc556dd: Waiting 530cc55618cd: Waiting 39bae602f753: Waiting 34fa414dfdf6: Waiting 8afcf30feef6: Pushed 393b15618b97: Pushed f540870f7f64: Pushed 34fa414dfdf6: Pushed 530cc55618cd: Pushed 490c7c373332: Pushed a1359dc556dd: Pushed 39bae602f753: Mounted from kubevirt/virt-controller f7c0d5be4cb9: Pushed 05dda64adde7: Pushed f3268140b0a2: Pushed 4b440db36f72: Pushed devel: digest: sha256:f45da4147aa600c673e20a447d2c0a932a5666713cee9e77be5f9bb501587d25 size: 2828 The push refers to a repository [localhost:32975/kubevirt/virt-handler] dd3c0112b01a: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-launcher dd3c0112b01a: Pushed devel: digest: sha256:7763fea0ee7150f4568dc9e4c9608efd22915798e0dd5481c97bfe9ea59d0f57 size: 741 The push refers to a repository [localhost:32975/kubevirt/virt-api] a8718a3fa3dc: Preparing 86b4b25303b4: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-handler 86b4b25303b4: Pushed a8718a3fa3dc: Pushed devel: digest: sha256:a959d35bec8338921e4cc8d12c8f6c6cf7db79d78964c2fe7258c48e3d7b2f42 size: 948 The push refers to a repository [localhost:32975/kubevirt/disks-images-provider] 4e6a541add8b: Preparing 0a565e931d7e: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-api 4e6a541add8b: Pushed 0a565e931d7e: Pushed devel: digest: sha256:9ec9f431e92f296c5c245ebeb5c5be17349b0894173f2f378fd5845dd1ced1de size: 948 The push refers to a repository [localhost:32975/kubevirt/vm-killer] 040d3361950b: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/disks-images-provider 040d3361950b: Pushed devel: digest: sha256:3370725daf8749cb01396570ec9eed8bbc5f22c6170a6e936d082c2474ad384b size: 740 The push refers to a repository [localhost:32975/kubevirt/registry-disk-v1alpha] 4cd98e29acca: Preparing 9beeb9a18439: Preparing 6709b2da72b8: Preparing 4cd98e29acca: Pushed 9beeb9a18439: Pushed 6709b2da72b8: Pushed devel: digest: sha256:d9d28efb8930b485a70de8379fab7e3f58dfd6fff01f8de492c1a5f927060a4d size: 948 The push refers to a repository [localhost:32975/kubevirt/cirros-registry-disk-demo] 6df2b29ab784: Preparing 4cd98e29acca: Preparing 9beeb9a18439: Preparing 6709b2da72b8: Preparing 9beeb9a18439: Mounted from kubevirt/registry-disk-v1alpha 4cd98e29acca: Mounted from kubevirt/registry-disk-v1alpha 6709b2da72b8: Mounted from kubevirt/registry-disk-v1alpha 6df2b29ab784: Pushed devel: digest: sha256:82fa4c2896239aad5bd12bb4c7706a48bc337a6ccc8d11445561ef1eda175c51 size: 1160 The push refers to a repository [localhost:32975/kubevirt/fedora-cloud-registry-disk-demo] 4d5a709c879a: Preparing 4cd98e29acca: Preparing 9beeb9a18439: Preparing 6709b2da72b8: Preparing 6709b2da72b8: Mounted from kubevirt/cirros-registry-disk-demo 4cd98e29acca: Mounted from kubevirt/cirros-registry-disk-demo 9beeb9a18439: Mounted from kubevirt/cirros-registry-disk-demo 4d5a709c879a: Pushed devel: digest: sha256:7e8bdf8b4168e3b3a1b3171da4f1ce8ea895be911395381388504555ccf034d6 size: 1161 The push refers to a repository [localhost:32975/kubevirt/alpine-registry-disk-demo] f1f1b92ff450: Preparing 4cd98e29acca: Preparing 9beeb9a18439: Preparing 6709b2da72b8: Preparing 4cd98e29acca: Mounted from kubevirt/fedora-cloud-registry-disk-demo 9beeb9a18439: Mounted from kubevirt/fedora-cloud-registry-disk-demo 6709b2da72b8: Mounted from kubevirt/fedora-cloud-registry-disk-demo f1f1b92ff450: Pushed devel: digest: sha256:e275c452f71f86319f7e863ad309b20259d32efdf97897bb5c08dd9af178a892 size: 1160 The push refers to a repository [localhost:32975/kubevirt/subresource-access-test] 8e8aec9cf16f: Preparing 2c4f6b64d5e3: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/vm-killer 2c4f6b64d5e3: Pushed 8e8aec9cf16f: Pushed devel: digest: sha256:790fe097bc1b38e8d872fd18d017bbcba546b500cee4748c9dc72e07ebe1ca91 size: 948 The push refers to a repository [localhost:32975/kubevirt/winrmcli] 161ef5381259: Preparing 2bef46eb5bf3: Preparing ac5611d25ed9: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/subresource-access-test 161ef5381259: Pushed ac5611d25ed9: Pushed 2bef46eb5bf3: Pushed devel: digest: sha256:d278cc6106ef5e93bf7e061c5a5179bf620be45f33f61a967d7dc9ad2d823eff size: 1165 The push refers to a repository [localhost:32975/kubevirt/cdi-http-import-server] d9c67802d16b: Preparing ddf2d82beace: Preparing 5286ef9d417e: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/winrmcli d9c67802d16b: Pushed 5286ef9d417e: Pushed ddf2d82beace: Pushed devel: digest: sha256:1c35365f408c42422db38b72b94cc289bfac479e6fb0a891228b6eb242957c78 size: 1160 make[1]: Leaving directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt' Done ./cluster/clean.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release0 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release0 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-alpha.5-38-gcfbd00f ++ KUBEVIRT_VERSION=v0.7.0-alpha.5-38-gcfbd00f + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:0977f1c716862710c8324798b95802e11a149f4532e33be20dd70877fe8f5332 ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli images/cdi-http-import-server' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32975/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Cleaning up ...' Cleaning up ... + cluster/kubectl.sh get vmis --all-namespaces -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,FINALIZERS:.metadata.finalizers --no-headers + grep foregroundDeleteVirtualMachine + read p error: the server doesn't have a resource type "vmis" + _kubectl delete ds -l kubevirt.io -n kube-system --cascade=false --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=libvirt --force --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=virt-handler --force --grace-period 0 No resources found + namespaces=(default ${namespace}) + for i in '${namespaces[@]}' + _kubectl -n default delete configmaps -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete configmaps -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete deployment -l kubevirt.io No resources found + _kubectl -n default delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rs -l kubevirt.io No resources found + _kubectl -n default delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete services -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n default delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete secrets -l kubevirt.io No resources found + _kubectl -n default delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pv -l kubevirt.io No resources found + _kubectl -n default delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pvc -l kubevirt.io No resources found + _kubectl -n default delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete ds -l kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n default delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete pods -l kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n default delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete rolebinding -l kubevirt.io No resources found + _kubectl -n default delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete roles -l kubevirt.io No resources found + _kubectl -n default delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterroles -l kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n default get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + _kubectl -n default delete deployment -l cdi.kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete deployment -l cdi.kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l cdi.kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete serviceaccounts -l cdi.kubevirt.io No resources found + _kubectl -n default delete clusterroles -l cdi.kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterroles -l cdi.kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l cdi.kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete clusterrolebinding -l cdi.kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l cdi.kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n default delete customresourcedefinitions -l cdi.kubevirt.io No resources found + for i in '${namespaces[@]}' + _kubectl -n kube-system delete configmaps -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete configmaps -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete deployment -l kubevirt.io No resources found + _kubectl -n kube-system delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rs -l kubevirt.io No resources found + _kubectl -n kube-system delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete services -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n kube-system delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete secrets -l kubevirt.io No resources found + _kubectl -n kube-system delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pv -l kubevirt.io No resources found + _kubectl -n kube-system delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pvc -l kubevirt.io No resources found + _kubectl -n kube-system delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete ds -l kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n kube-system delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete pods -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete rolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete roles -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterroles -l kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ KUBECONFIG=cluster/os-3.10.0/.kubeconfig ++ cluster/os-3.10.0/.kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + _kubectl -n kube-system delete deployment -l cdi.kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete deployment -l cdi.kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l cdi.kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete serviceaccounts -l cdi.kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l cdi.kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterroles -l cdi.kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l cdi.kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete clusterrolebinding -l cdi.kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l cdi.kubevirt.io + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl -n kube-system delete customresourcedefinitions -l cdi.kubevirt.io No resources found + sleep 2 + echo Done Done ./cluster/deploy.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ MANIFEST_TEMPLATES_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/templates/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.10-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.10-release0 ++ job_prefix=kubevirt-functional-tests-openshift-3.10-release0 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.7.0-alpha.5-38-gcfbd00f ++ KUBEVIRT_VERSION=v0.7.0-alpha.5-38-gcfbd00f + source cluster/os-3.10.0/provider.sh ++ set -e ++ image=os-3.10.0@sha256:0977f1c716862710c8324798b95802e11a149f4532e33be20dd70877fe8f5332 ++ source cluster/ephemeral-provider-common.sh +++ set -e +++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ KUBEVIRT_PROVIDER=os-3.10.0 ++ source hack/config-default.sh source hack/config-os-3.10.0.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/disks-images-provider images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli images/cdi-http-import-server' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ namespace=kube-system ++ test -f hack/config-provider-os-3.10.0.sh ++ source hack/config-provider-os-3.10.0.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/cluster/os-3.10.0/.kubectl +++ docker_prefix=localhost:32975/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Deploying ...' Deploying ... + [[ -z openshift-3.10-release ]] + [[ openshift-3.10-release =~ .*-dev ]] + [[ openshift-3.10-release =~ .*-release ]] + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/demo-content.yaml =~ .*demo.* ]] + continue + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml =~ .*demo.* ]] + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml clusterrole.rbac.authorization.k8s.io "kubevirt.io:admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:edit" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:view" created serviceaccount "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-apiserver-auth-delegator" created rolebinding.rbac.authorization.k8s.io "kubevirt-apiserver" created role.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-apiserver" created clusterrole.rbac.authorization.k8s.io "kubevirt-controller" created serviceaccount "kubevirt-controller" created serviceaccount "kubevirt-privileged" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-controller-cluster-admin" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-privileged-cluster-admin" created clusterrole.rbac.authorization.k8s.io "kubevirt.io:default" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt.io:default" created service "virt-api" created deployment.extensions "virt-api" created deployment.extensions "virt-controller" created daemonset.extensions "virt-handler" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstances.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancereplicasets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachineinstancepresets.kubevirt.io" created customresourcedefinition.apiextensions.k8s.io "virtualmachines.kubevirt.io" created + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R persistentvolumeclaim "disk-alpine" created persistentvolume "host-path-disk-alpine" created persistentvolumeclaim "disk-custom" created persistentvolume "host-path-disk-custom" created persistentvolume "host-path-disk-datavolume1" created persistentvolume "host-path-disk-datavolume2" created persistentvolume "host-path-disk-datavolume3" created service "cdi-http-import-server" created deployment.extensions "cdi-http-import-server" created daemonset.extensions "disks-images-provider" created serviceaccount "kubevirt-testing" created clusterrolebinding.rbac.authorization.k8s.io "kubevirt-testing-cluster-admin" created + [[ os-3.10.0 =~ os-* ]] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-controller"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-testing"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-privileged"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-apiserver"] + _kubectl adm policy add-scc-to-user privileged admin + export KUBECONFIG=cluster/os-3.10.0/.kubeconfig + KUBECONFIG=cluster/os-3.10.0/.kubeconfig + cluster/os-3.10.0/.kubectl adm policy add-scc-to-user privileged admin scc "privileged" added to: ["admin"] + echo Done Done + namespaces=(kube-system default) + [[ kube-system != \k\u\b\e\-\s\y\s\t\e\m ]] + timeout=300 + sample=30 + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n kube-system --no-headers ++ grep -v Running ++ cluster/kubectl.sh get pods -n kube-system --no-headers + '[' -n 'cdi-http-import-server-7dbffbcb77-9hr2v 0/1 ContainerCreating 0 6s disks-images-provider-rlqwd 0/1 ContainerCreating 0 5s disks-images-provider-vvj2z 0/1 ContainerCreating 0 5s virt-api-7d79764579-2x2bc 0/1 ContainerCreating 0 8s virt-api-7d79764579-87swc 0/1 ContainerCreating 0 8s virt-handler-8b9gh 0/1 ContainerCreating 0 8s virt-handler-cdt5l 0/1 ContainerCreating 0 8s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + cluster/kubectl.sh get pods -n kube-system --no-headers + grep -v Running cdi-http-import-server-7dbffbcb77-9hr2v 0/1 ContainerCreating 0 7s disks-images-provider-rlqwd 0/1 ContainerCreating 0 6s disks-images-provider-vvj2z 0/1 ContainerCreating 0 6s virt-api-7d79764579-2x2bc 0/1 ContainerCreating 0 8s virt-api-7d79764579-87swc 0/1 ContainerCreating 0 8s virt-handler-8b9gh 0/1 ContainerCreating 0 8s virt-handler-cdt5l 0/1 ContainerCreating 0 8s + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false + '[' -n false ']' + echo 'Waiting for KubeVirt containers to become ready ...' Waiting for KubeVirt containers to become ready ... + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + grep false false + sleep 30 + current_time=30 + '[' 30 -gt 300 ']' ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n kube-system + cluster/kubectl.sh get pods -n kube-system NAME READY STATUS RESTARTS AGE cdi-http-import-server-7dbffbcb77-9hr2v 1/1 Running 0 1m disks-images-provider-rlqwd 1/1 Running 0 1m disks-images-provider-vvj2z 1/1 Running 0 1m master-api-node01 1/1 Running 1 11d master-controllers-node01 1/1 Running 1 11d master-etcd-node01 1/1 Running 1 11d virt-api-7d79764579-2x2bc 1/1 Running 0 1m virt-api-7d79764579-87swc 1/1 Running 0 1m virt-controller-7d57d96b65-g4n8m 1/1 Running 0 1m virt-controller-7d57d96b65-z5pz4 1/1 Running 0 1m virt-handler-8b9gh 1/1 Running 0 1m virt-handler-cdt5l 1/1 Running 0 1m + for i in '${namespaces[@]}' + current_time=0 ++ kubectl get pods -n default --no-headers ++ cluster/kubectl.sh get pods -n default --no-headers ++ grep -v Running + '[' -n '' ']' + current_time=0 ++ kubectl get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers ++ grep false ++ cluster/kubectl.sh get pods -n default '-ocustom-columns=status:status.containerStatuses[*].ready' --no-headers + '[' -n '' ']' + kubectl get pods -n default + cluster/kubectl.sh get pods -n default NAME READY STATUS RESTARTS AGE docker-registry-1-tqlsm 1/1 Running 1 11d registry-console-1-bhtqz 1/1 Running 1 11d router-1-r2xxq 1/1 Running 1 11d + kubectl version + cluster/kubectl.sh version oc v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 features: Basic-Auth GSSAPI Kerberos SPNEGO Server https://127.0.0.1:32972 openshift v3.10.0-rc.0+c20e215 kubernetes v1.10.0+b81c8f8 + ginko_params='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + [[ -d /home/nfs/images/windows2016 ]] + [[ openshift-3.10-release =~ windows.* ]] + FUNC_TEST_ARGS='--ginkgo.noColor --junit-output=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.10-release/junit.xml' + make functest hack/dockerized "hack/build-func-tests.sh" sha256:0e52976342d8bb3bff850b704549f06d30e55824f50309732b55e928864b05c5 go version go1.10 linux/amd64 go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/functests.sh Running Suite: Tests Suite ========================== Random Seed: 1530587855 Will run 132 of 132 specs • [SLOW TEST:5.590 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:48 with correct permissions /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:51 should be allowed to access subresource endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:52 ------------------------------ • [SLOW TEST:6.164 seconds] Subresource Api /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:37 Rbac Authorization /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:48 Without permissions /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:56 should not be able to access subresource endpoint /root/go/src/kubevirt.io/kubevirt/tests/subresource_api_test.go:57 ------------------------------ •••• ------------------------------ • Failure [300.443 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should update VirtualMachine once VMIs are up [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:132 Timed out after 300.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:138 ------------------------------ • ------------------------------ • Failure [300.093 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should remove owner references on the VirtualMachineInstance if it is orphan deleted [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:154 Timed out after 300.000s. Expected <[]v1.OwnerReference | len:0, cap:0>: nil not to be empty /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:161 ------------------------------ STEP: Starting the VirtualMachineInstance • Failure [300.353 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if it gets deleted [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:182 Timed out after 300.000s. Expected success, but got an error: <*errors.StatusError | 0xc42062a120>: { ErrStatus: { TypeMeta: {Kind: "", APIVersion: ""}, ListMeta: {SelfLink: "", ResourceVersion: "", Continue: ""}, Status: "Failure", Message: "virtualmachineinstances.kubevirt.io \"testvmijf5bw\" not found", Reason: "NotFound", Details: { Name: "testvmijf5bw", Group: "kubevirt.io", Kind: "virtualmachineinstances", UID: "", Causes: nil, RetryAfterSeconds: 0, }, Code: 404, }, } virtualmachineinstances.kubevirt.io "testvmijf5bw" not found /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1713 ------------------------------ STEP: Creating a new VMI STEP: Waiting for the VMI's VirtualMachineInstance to start • Failure [120.316 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should recreate VirtualMachineInstance if the VirtualMachineInstance's pod gets deleted [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:202 Timed out after 120.000s. Expected success, but got an error: <*errors.StatusError | 0xc420a374d0>: { ErrStatus: { TypeMeta: {Kind: "", APIVersion: ""}, ListMeta: {SelfLink: "", ResourceVersion: "", Continue: ""}, Status: "Failure", Message: "virtualmachineinstances.kubevirt.io \"testvmifs4xk\" not found", Reason: "NotFound", Details: { Name: "testvmifs4xk", Group: "kubevirt.io", Kind: "virtualmachineinstances", UID: "", Causes: nil, RetryAfterSeconds: 0, }, Code: 404, }, } virtualmachineinstances.kubevirt.io "testvmifs4xk" not found /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:221 ------------------------------ STEP: Starting the VirtualMachineInstance • Failure [300.313 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should stop VirtualMachineInstance if running set to false [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:262 Timed out after 300.001s. Expected success, but got an error: <*errors.StatusError | 0xc420866360>: { ErrStatus: { TypeMeta: {Kind: "", APIVersion: ""}, ListMeta: {SelfLink: "", ResourceVersion: "", Continue: ""}, Status: "Failure", Message: "virtualmachineinstances.kubevirt.io \"testvmiqvpnt\" not found", Reason: "NotFound", Details: { Name: "testvmiqvpnt", Group: "kubevirt.io", Kind: "virtualmachineinstances", UID: "", Causes: nil, RetryAfterSeconds: 0, }, Code: 404, }, } virtualmachineinstances.kubevirt.io "testvmiqvpnt" not found /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1713 ------------------------------ STEP: Doing run: 0 STEP: Starting the VirtualMachineInstance • Failure [300.397 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should start and stop VirtualMachineInstance multiple times [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:270 Timed out after 300.001s. Expected success, but got an error: <*errors.StatusError | 0xc420a36bd0>: { ErrStatus: { TypeMeta: {Kind: "", APIVersion: ""}, ListMeta: {SelfLink: "", ResourceVersion: "", Continue: ""}, Status: "Failure", Message: "virtualmachineinstances.kubevirt.io \"testvmij6572\" not found", Reason: "NotFound", Details: { Name: "testvmij6572", Group: "kubevirt.io", Kind: "virtualmachineinstances", UID: "", Causes: nil, RetryAfterSeconds: 0, }, Code: 404, }, } virtualmachineinstances.kubevirt.io "testvmij6572" not found /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1713 ------------------------------ • Failure [360.172 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should not update the VirtualMachineInstance spec if Running [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:283 Timed out after 360.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:290 ------------------------------ STEP: Creating new VMI, not running STEP: Starting the VirtualMachineInstance • Failure [300.357 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 should survive guest shutdown, multiple times [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:324 Timed out after 300.001s. Expected success, but got an error: <*errors.StatusError | 0xc4207be5a0>: { ErrStatus: { TypeMeta: {Kind: "", APIVersion: ""}, ListMeta: {SelfLink: "", ResourceVersion: "", Continue: ""}, Status: "Failure", Message: "virtualmachineinstances.kubevirt.io \"testvmi59sgt\" not found", Reason: "NotFound", Details: { Name: "testvmi59sgt", Group: "kubevirt.io", Kind: "virtualmachineinstances", UID: "", Causes: nil, RetryAfterSeconds: 0, }, Code: 404, }, } virtualmachineinstances.kubevirt.io "testvmi59sgt" not found /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1713 ------------------------------ STEP: getting an VMI STEP: Invoking virtctl start STEP: Getting the status of the VMI • Failure [360.172 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:372 should start a VirtualMachineInstance once [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:373 Timed out after 360.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:390 ------------------------------ STEP: getting an VMI STEP: Invoking virtctl stop STEP: Ensuring VMI is running • Failure [360.421 seconds] VirtualMachine /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:47 A valid VirtualMachine given /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:115 Using virtctl interface /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:372 should stop a VirtualMachineInstance once [It] /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:404 Timed out after 360.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vm_test.go:417 ------------------------------ ••STEP: Generating VM JSON from the Template via oc-process command STEP: Creating VM via oc-create command STEP: Checking if the VM exists via oc-get command. STEP: Launching VMI via oc-patch command STEP: Checking if the VMI does exist via oc-get command STEP: Deleting the VM via oc-delete command STEP: Checking if the VM does not exist anymore via oc-get command. ------------------------------ • Failure [64.467 seconds] Templates /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:42 Launching VMI from VM Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:60 with given Fedora Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:193 with given VM JSON from the Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:152 with given VM from the VM JSON /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:158 should succeed to launch a VMI using oc-patch command [It] /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:161 Timed out after 60.280s. Timed out waiting for VMI to appear Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:118 ------------------------------ STEP: Generating VM JSON from the Template via oc-process command STEP: Creating VM via oc-create command STEP: Checking if the VM exists via oc-get command. STEP: Launching VMI via oc-patch command STEP: Checking if the VMI does exist via oc-get command STEP: Deleting the VM via oc-delete command STEP: Checking if the VM does not exist anymore via oc-get command. • Failure in Spec Setup (JustBeforeEach) [64.684 seconds] Templates /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:42 Launching VMI from VM Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:60 with given Fedora Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:193 with given VM JSON from the Template /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:152 with given VM from the VM JSON /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:158 with given VMI from the VM /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:163 should succeed to terminate the VMI using oc-patch command [JustBeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:166 Timed out after 60.298s. Timed out waiting for VMI to appear Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/template_test.go:118 ------------------------------ •••••••••••STEP: Starting a VirtualMachineInstance ------------------------------ • Failure [180.267 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:43 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:54 with 3 CPU cores /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:55 should report 3 cpu cores under guest OS [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:61 Timed out after 90.015s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 ------------------------------ S [SKIPPING] [0.349 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:43 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:54 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:107 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-2Mi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 No node with hugepages hugepages-2Mi capacity /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:159 ------------------------------ S [SKIPPING] [0.201 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:43 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:54 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:107 should consume hugepages /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 hugepages-1Gi [It] /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 No node with hugepages hugepages-1Gi capacity /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:159 ------------------------------ STEP: Starting a VM • Failure [30.257 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:43 VirtualMachineInstance definition /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:54 with hugepages /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:107 with usupported page size /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:193 should failed to schedule the pod [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:194 Timed out after 30.000s. Expected : false to be true /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:229 ------------------------------ • Failure [180.376 seconds] Configurations /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:43 New VirtualMachineInstance with all supported drives /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:237 should have all the device nodes [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_configuration_test.go:260 Timed out after 90.036s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 ------------------------------ • [SLOW TEST:20.098 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:20.291 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given an vm /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:20.385 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi preset /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ • [SLOW TEST:20.474 seconds] User Access /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:33 With default kubevirt service accounts /root/go/src/kubevirt.io/kubevirt/tests/access_test.go:41 should verify permissions are correct for view, edit, and admin /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table.go:92 given a vmi replica set /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go:46 ------------------------------ STEP: Starting a VirtualMachineInstance • Failure [180.392 seconds] VNC /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:37 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:48 with VNC connection /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:49 should allow accessing the VNC device [It] /root/go/src/kubevirt.io/kubevirt/tests/vnc_test.go:50 Timed out after 90.011s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 ------------------------------ STEP: Starting the VirtualMachineInstance • Failure [360.280 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting and stopping the same VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:90 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:91 should success multiple times [It] /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:92 Timed out after 180.008s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 ------------------------------ STEP: Starting the VirtualMachineInstance • Failure [120.273 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:111 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:112 should not modify the spec on status update [It] /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:113 Timed out after 60.007s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 ------------------------------ STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance STEP: Starting a VirtualMachineInstance • Failure [60.485 seconds] RegistryDisk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:41 Starting multiple VMIs /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:129 with ephemeral registry disk /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:130 should success [It] /root/go/src/kubevirt.io/kubevirt/tests/registry_disk_test.go:131 Timed out after 30.011s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [1.064 seconds] DataVolume Integration /root/go/src/kubevirt.io/kubevirt/tests/datavolume_test.go:36 Starting a VirtualMachineInstance with a DataVolume [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/datavolume_test.go:74 using Alpine import /root/go/src/kubevirt.io/kubevirt/tests/datavolume_test.go:75 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/tests/datavolume_test.go:76 Skip DataVolume tests when CDI is not present /root/go/src/kubevirt.io/kubevirt/tests/datavolume_test.go:45 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.201 seconds] DataVolume Integration /root/go/src/kubevirt.io/kubevirt/tests/datavolume_test.go:36 Starting a VirtualMachine with a DataVolume [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/datavolume_test.go:101 using Alpine import /root/go/src/kubevirt.io/kubevirt/tests/datavolume_test.go:102 should be successfully started and stopped multiple times /root/go/src/kubevirt.io/kubevirt/tests/datavolume_test.go:103 Skip DataVolume tests when CDI is not present /root/go/src/kubevirt.io/kubevirt/tests/datavolume_test.go:45 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.010 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to start a vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:133 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1468 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.009 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 should succeed to stop a running vmi [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:139 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1468 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.009 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have correct UUID /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:192 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1468 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.010 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with winrm connection [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:150 should have pod IP /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:208 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1468 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.010 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to start a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:242 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1468 ------------------------------ S [SKIPPING] in Spec Setup (BeforeEach) [0.008 seconds] Windows VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:57 with kubectl command [BeforeEach] /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:226 should succeed to stop a vmi /root/go/src/kubevirt.io/kubevirt/tests/windows_test.go:250 Skip Windows tests that requires PVC disk-windows /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1468 ------------------------------ STEP: Creating a new VirtualMachineInstance • Failure [180.278 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a cirros image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:66 should return that we are running cirros [It] /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:67 Timed out after 90.010s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 ------------------------------ STEP: Creating a new VirtualMachineInstance • Failure [180.381 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 with a fedora image /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:76 should return that we are running fedora [It] /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:77 Timed out after 90.009s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 ------------------------------ STEP: Creating a new VirtualMachineInstance • Failure [180.366 seconds] Console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:35 A new VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:64 with a serial console /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:65 should be able to reconnect to console multiple times [It] /root/go/src/kubevirt.io/kubevirt/tests/console_test.go:86 Timed out after 90.009s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 ------------------------------ •STEP: Destroying the leading controller pod STEP: Starting a new VirtualMachineInstance ------------------------------ • Failure [200.441 seconds] LeaderElection /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:43 Start a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:53 when the controller pod is not running /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:54 should success [It] /root/go/src/kubevirt.io/kubevirt/tests/controller_leader_election_test.go:55 Timed out after 90.013s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 ------------------------------ • ------------------------------ • Failure [180.313 seconds] VMIlifecycle /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:50 Creating a VirtualMachineInstance /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:72 should start it [It] /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:78 Timed out after 90.010s. Expected : false to equal : true /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 ------------------------------ panic: test timed out after 1h30m0s goroutine 10148 [running]: testing.(*M).startAlarm.func1() /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1240 +0xfc created by time.goFunc /gimme/.gimme/versions/go1.10.linux.amd64/src/time/sleep.go:172 +0x44 goroutine 1 [chan receive, 90 minutes]: testing.(*T).Run(0xc4200280f0, 0x1260425, 0x9, 0x12eb230, 0x47fa16) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:825 +0x301 testing.runTests.func1(0xc420028000) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1063 +0x64 testing.tRunner(0xc420028000, 0xc420683df8) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:777 +0xd0 testing.runTests(0xc4206b9b80, 0x1b37e10, 0x1, 0x1, 0x412009) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:1061 +0x2c4 testing.(*M).Run(0xc42080e000, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:978 +0x171 main.main() _testmain.go:44 +0x151 goroutine 35 [chan receive]: kubevirt.io/kubevirt/vendor/github.com/golang/glog.(*loggingT).flushDaemon(0x1b5eba0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/golang/glog/glog.go:879 +0x8b created by kubevirt.io/kubevirt/vendor/github.com/golang/glog.init.0 /root/go/src/kubevirt.io/kubevirt/vendor/github.com/golang/glog/glog.go:410 +0x203 goroutine 36 [syscall, 90 minutes]: os/signal.signal_recv(0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/sigqueue.go:139 +0xa6 os/signal.loop() /gimme/.gimme/versions/go1.10.linux.amd64/src/os/signal/signal_unix.go:22 +0x22 created by os/signal.init.0 /gimme/.gimme/versions/go1.10.linux.amd64/src/os/signal/signal_unix.go:28 +0x41 goroutine 7 [sleep]: time.Sleep(0xad77223) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/time.go:102 +0x166 kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol.realClock.Sleep(0xad77223) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol/throttle.go:66 +0x2b kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol.(*tokenBucketRateLimiter).Accept(0xc4204e75e0) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/util/flowcontrol/throttle.go:91 +0xbd kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest.(*Request).tryThrottle(0xc42087b680) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest/request.go:478 +0x1fd kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest.(*Request).Do(0xc42087b680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/k8s.io/client-go/rest/request.go:733 +0x62 kubevirt.io/kubevirt/pkg/kubecli.(*vmis).Get(0xc420115680, 0xc420cee3d0, 0xc, 0xc420eaef40, 0xc420115680, 0x8, 0x7f974ece5000) /root/go/src/kubevirt.io/kubevirt/pkg/kubecli/vmi.go:317 +0x125 kubevirt.io/kubevirt/tests.waitForVMIStart.func1(0x0) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1157 +0xc2 reflect.Value.call(0x10867e0, 0xc420afb050, 0x13, 0x125a8d4, 0x4, 0xc4209aaf18, 0x0, 0x0, 0x10867e0, 0x10867e0, ...) /gimme/.gimme/versions/go1.10.linux.amd64/src/reflect/value.go:447 +0x969 reflect.Value.Call(0x10867e0, 0xc420afb050, 0x13, 0xc4209aaf18, 0x0, 0x0, 0x44b21b, 0xc420bef728, 0xc4209aaf50) /gimme/.gimme/versions/go1.10.linux.amd64/src/reflect/value.go:308 +0xa4 kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion.(*AsyncAssertion).pollActual(0xc420c37980, 0x0, 0x0, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go:71 +0x9f kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion.(*AsyncAssertion).match(0xc420c37980, 0x1371b20, 0xc42030e9c0, 0x412801, 0x0, 0x0, 0x0, 0xc42030e9c0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go:141 +0x305 kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion.(*AsyncAssertion).Should(0xc420c37980, 0x1371b20, 0xc42030e9c0, 0x0, 0x0, 0x0, 0xc420c37980) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go:48 +0x62 kubevirt.io/kubevirt/tests.waitForVMIStart(0x1368900, 0xc4202d5680, 0x5a, 0x0, 0x0, 0x1b7d201) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1167 +0x668 kubevirt.io/kubevirt/tests.WaitForSuccessfulVMIStart(0x1368900, 0xc4202d5680, 0x1b7d200, 0x0) /root/go/src/kubevirt.io/kubevirt/tests/utils.go:1190 +0x43 kubevirt.io/kubevirt/tests_test.glob..func16.3.3() /root/go/src/kubevirt.io/kubevirt/tests/vmi_lifecycle_test.go:87 +0x142 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).runSync(0xc420382660, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go:109 +0x9c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*runner).run(0xc420382660, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go:63 +0x13e kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes.(*ItNode).Run(0xc4206b9a80, 0x1365360, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go:25 +0x7f kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec.(*Spec).runSample(0xc42051a270, 0x0, 0x1365360, 0xc420213080) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec/spec.go:176 +0x5a6 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec.(*Spec).Run(0xc42051a270, 0x1365360, 0xc420213080) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/spec/spec.go:127 +0xe3 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpec(0xc4208b77c0, 0xc42051a270, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:198 +0x10d kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpecs(0xc4208b77c0, 0x12ebf01) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:168 +0x32c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run(0xc4208b77c0, 0xb) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:64 +0xdc kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/suite.(*Suite).Run(0xc4200fe9b0, 0x7f974ebbe148, 0xc4200280f0, 0x126281d, 0xb, 0xc4206b9c80, 0x2, 0x2, 0x137fb20, 0xc420213080, ...) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/suite/suite.go:62 +0x27c kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo.RunSpecsWithCustomReporters(0x13660a0, 0xc4200280f0, 0x126281d, 0xb, 0xc4206b9c20, 0x2, 0x2, 0x1) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go:218 +0x253 kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo.RunSpecsWithDefaultAndCustomReporters(0x13660a0, 0xc4200280f0, 0x126281d, 0xb, 0xc4205a8580, 0x1, 0x1, 0x1) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go:206 +0x129 kubevirt.io/kubevirt/tests_test.TestTests(0xc4200280f0) /root/go/src/kubevirt.io/kubevirt/tests/tests_suite_test.go:42 +0xaa testing.tRunner(0xc4200280f0, 0x12eb230) /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:777 +0xd0 created by testing.(*T).Run /gimme/.gimme/versions/go1.10.linux.amd64/src/testing/testing.go:824 +0x2e0 goroutine 8 [chan receive, 90 minutes]: kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).registerForInterrupts(0xc4208b77c0) /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:220 +0xc0 created by kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run /root/go/src/kubevirt.io/kubevirt/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go:59 +0x60 goroutine 66 [select, 90 minutes, locked to thread]: runtime.gopark(0x12ed010, 0x0, 0x125d19f, 0x6, 0x18, 0x1) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/proc.go:291 +0x11a runtime.selectgo(0xc420095750, 0xc4200480c0) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/select.go:392 +0xe50 runtime.ensureSigM.func1() /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/signal_unix.go:549 +0x1f4 runtime.goexit() /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/asm_amd64.s:2361 +0x1 goroutine 13 [IO wait]: internal/poll.runtime_pollWait(0x7f974ec8dff8, 0x72, 0xc420d2f850) /gimme/.gimme/versions/go1.10.linux.amd64/src/runtime/netpoll.go:173 +0x57 internal/poll.(*pollDesc).wait(0xc4205b4e98, 0x72, 0xffffffffffffff00, 0x1367040, 0x1a4f638) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_poll_runtime.go:85 +0x9b internal/poll.(*pollDesc).waitRead(0xc4205b4e98, 0xc42065e000, 0x4000, 0x4000) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_poll_runtime.go:90 +0x3d internal/poll.(*FD).Read(0xc4205b4e80, 0xc42065e000, 0x4000, 0x4000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/internal/poll/fd_unix.go:157 +0x17d net.(*netFD).Read(0xc4205b4e80, 0xc42065e000, 0x4000, 0x4000, 0x0, 0x8, 0x3ffb) /gimme/.gimme/versions/go1.10.linux.amd64/src/net/fd_unix.go:202 +0x4f net.(*conn).Read(0xc4201725d8, 0xc42065e000, 0x4000, 0x4000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/net/net.go:176 +0x6a crypto/tls.(*block).readFromUntil(0xc4205b1fb0, 0x7f974ebbe380, 0xc4201725d8, 0x5, 0xc4201725d8, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:493 +0x96 crypto/tls.(*Conn).readRecord(0xc4202e6700, 0x12ed117, 0xc4202e6820, 0x20) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:595 +0xe0 crypto/tls.(*Conn).Read(0xc4202e6700, 0xc4204b8000, 0x1000, 0x1000, 0x0, 0x0, 0x0) /gimme/.gimme/versions/go1.10.linux.amd64/src/crypto/tls/conn.go:1156 +0x100 bufio.(*Reader).Read(0xc420664f60, 0xc420742578, 0x9, 0x9, 0xc4206f1fd8, 0xc420847c00, 0xc420d2fd10) /gimme/.gimme/versions/go1.10.linux.amd64/src/bufio/bufio.go:216 +0x238 io.ReadAtLeast(0x1364020, 0xc420664f60, 0xc420742578, 0x9, 0x9, 0x9, 0xc420d2fce0, 0xc420d2fce0, 0x406614) /gimme/.gimme/versions/go1.10.linux.amd64/src/io/io.go:309 +0x86 io.ReadFull(0x1364020, 0xc420664f60, 0xc420742578, 0x9, 0x9, 0xc4206f1f80, 0xc420d2fd10, 0xc400004801) /gimme/.gimme/versions/go1.10.linux.amd64/src/io/io.go:327 +0x58 kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.readFrameHeader(0xc420742578, 0x9, 0x9, 0x1364020, 0xc420664f60, 0x0, 0xc400000000, 0x7ba9bd, 0xc420d2ffb0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/frame.go:237 +0x7b kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*Framer).ReadFrame(0xc420742540, 0xc420b63f50, 0x0, 0x0, 0x0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/frame.go:492 +0xa4 kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*clientConnReadLoop).run(0xc420d2ffb0, 0x12ec148, 0xc4204897b0) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:1428 +0x8e kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*ClientConn).readLoop(0xc4202ed040) /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:1354 +0x76 created by kubevirt.io/kubevirt/vendor/golang.org/x/net/http2.(*Transport).newClientConn /root/go/src/kubevirt.io/kubevirt/vendor/golang.org/x/net/http2/transport.go:579 +0x651 make: *** [functest] Error 2 + make cluster-down ./cluster/down.sh