Commit b6a429dc authored by Sharif Elgamal's avatar Sharif Elgamal

hey did you want more files? good

parent f2ebbb9c
......@@ -19,12 +19,10 @@ package cmd
import (
"github.com/spf13/cobra"
cmdConfig "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/image"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/node"
)
// cacheImageConfigKey is the config field name used to store which images we have previously cached
......@@ -77,54 +75,13 @@ var reloadCacheCmd = &cobra.Command{
Short: "reload cached images.",
Long: "reloads images previously added using the 'cache add' subcommand",
Run: func(cmd *cobra.Command, args []string) {
err := cacheAndLoadImagesInConfig()
err := node.CacheAndLoadImagesInConfig()
if err != nil {
exit.WithError("Failed to reload cached images", err)
}
},
}
func imagesInConfigFile() ([]string, error) {
configFile, err := config.ReadConfig(localpath.ConfigFile)
if err != nil {
return nil, err
}
if values, ok := configFile[cacheImageConfigKey]; ok {
var images []string
for key := range values.(map[string]interface{}) {
images = append(images, key)
}
return images, nil
}
return []string{}, nil
}
// saveImagesToTarFromConfig saves images to tar in cache which specified in config file.
// currently only used by download-only option
func saveImagesToTarFromConfig() error {
images, err := imagesInConfigFile()
if err != nil {
return err
}
if len(images) == 0 {
return nil
}
return image.SaveToDir(images, constants.ImageCacheDir)
}
// cacheAndLoadImagesInConfig loads the images currently in the config file
// called by 'start' and 'cache reload' commands.
func cacheAndLoadImagesInConfig() error {
images, err := imagesInConfigFile()
if err != nil {
return err
}
if len(images) == 0 {
return nil
}
return machine.CacheAndLoadImages(images)
}
func init() {
cacheCmd.AddCommand(addCacheCmd)
cacheCmd.AddCommand(deleteCacheCmd)
......
......@@ -85,7 +85,7 @@ var printProfilesTable = func() {
glog.Errorf("%q has no control plane: %v", p.Name, err)
// Print the data we know about anyways
}
validData = append(validData, []string{p.Name, p.Config.VMDriver, p.Config.KubernetesConfig.ContainerRuntime, cp.IP, strconv.Itoa(cp.Port), p.Config.KubernetesConfig.KubernetesVersion, p.Status})
validData = append(validData, []string{p.Name, p.Config.Driver, p.Config.KubernetesConfig.ContainerRuntime, cp.IP, strconv.Itoa(cp.Port), p.Config.KubernetesConfig.KubernetesVersion, p.Status})
}
table.AppendBulk(validData)
......
......@@ -192,7 +192,7 @@ func deleteProfile(profile *pkg_config.Profile) error {
return DeletionError{Err: delErr, Errtype: MissingProfile}
}
if err == nil && driver.BareMetal(cc.VMDriver) {
if err == nil && driver.BareMetal(cc.Driver) {
if err := uninstallKubernetes(api, profile.Name, cc.KubernetesConfig, viper.GetString(cmdcfg.Bootstrapper)); err != nil {
deletionError, ok := err.(DeletionError)
if ok {
......
......@@ -23,9 +23,10 @@ import (
// nodeCmd represents the set of node subcommands
var nodeCmd = &cobra.Command{
Use: "node",
Short: "Node operations",
Long: "Operations on nodes",
Use: "node",
Short: "Node operations",
Long: "Operations on nodes",
Hidden: true, // This won't be fully functional and thus should not be documented yet
Run: func(cmd *cobra.Command, args []string) {
exit.UsageT("Usage: minikube node [add|start|stop|delete]")
},
......
......@@ -45,12 +45,15 @@ var nodeAddCmd = &cobra.Command{
out.T(out.SuccessType, "Adding node {{.name}} to cluster {{.profile}}", out.V{"name": name, "profile": profile})
cp := viper.GetBool("control-plane")
worker := viper.GetBool("worker")
err = node.Add(mc, name, cp, worker, "", profile)
n, err := node.Add(mc, name, cp, worker, "", profile)
if err != nil {
exit.WithError("Error adding node to cluster", err)
}
node.Start(mc, name)
_, err = node.Start(mc, n, false)
if err != nil {
exit.WithError("Error starting node", err)
}
},
}
......
......@@ -47,8 +47,13 @@ var nodeDeleteCmd = &cobra.Command{
exit.WithError("loading config", err)
}
n, _, err := node.Retrieve(cc, name)
if err != nil {
exit.WithError("retrieving node", err)
}
if cluster.IsHostRunning(api, name) {
node.Stop(cc, name)
node.Stop(cc, n)
}
node.Delete(cc, name)
......
......@@ -54,8 +54,14 @@ var nodeStartCmd = &cobra.Command{
if err != nil {
exit.WithError("loading config", err)
}
n, _, err := node.Retrieve(cc, name)
if err != nil {
exit.WithError("retrieving node", err)
}
// Start it up baby
node.Start(cc, name)
node.Start(cc, n, false)
},
}
......
......@@ -54,7 +54,12 @@ var nodeStopCmd = &cobra.Command{
exit.WithError("loading config", err)
}
node.Stop(cc, name)
n, _, err := node.Retrieve(cc, name)
if err != nil {
exit.WithError("retrieving node", err)
}
node.Stop(cc, n)
},
}
......
......@@ -44,7 +44,6 @@ import (
gopshost "github.com/shirou/gopsutil/host"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"golang.org/x/sync/errgroup"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
......@@ -131,8 +130,6 @@ const (
var (
registryMirror []string
dockerEnv []string
dockerOpt []string
insecureRegistry []string
apiServerNames []string
addonList []string
......@@ -239,8 +236,8 @@ func initNetworkingFlags() {
startCmd.Flags().String(imageRepository, "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \"auto\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers")
startCmd.Flags().String(imageMirrorCountry, "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn.")
startCmd.Flags().String(serviceCIDR, constants.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.")
startCmd.Flags().StringArrayVar(&dockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)")
startCmd.Flags().StringArrayVar(&dockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)")
startCmd.Flags().StringArrayVar(&node.DockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)")
startCmd.Flags().StringArrayVar(&node.DockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)")
}
// startCmd represents the start command
......@@ -341,7 +338,10 @@ func runStart(cmd *cobra.Command, args []string) {
ssh.SetDefaultClient(ssh.External)
}
node.Start(&mc, &n, true, isUpgrade)
kubeconfig, err := node.Start(&mc, &n, true)
if err != nil {
exit.WithError("Starting node", err)
}
if err := showKubectlInfo(kubeconfig, k8sVersion, mc.Name); err != nil {
glog.Errorf("kubectl info: %v", err)
......@@ -421,17 +421,6 @@ func setupKubeconfig(h *host.Host, c *config.MachineConfig, n *config.Node, clus
return kcs, nil
}
func showVersionInfo(k8sVersion string, cr cruntime.Manager) {
version, _ := cr.Version()
out.T(cr.Style(), "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...", out.V{"k8sVersion": k8sVersion, "runtime": cr.Name(), "runtimeVersion": version})
for _, v := range dockerOpt {
out.T(out.Option, "opt {{.docker_option}}", out.V{"docker_option": v})
}
for _, v := range dockerEnv {
out.T(out.Option, "env {{.docker_env}}", out.V{"docker_env": v})
}
}
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error {
if kcs.KeepContext {
out.T(out.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName})
......@@ -487,8 +476,8 @@ func selectDriver(existing *config.MachineConfig) registry.DriverState {
}
// By default, the driver is whatever we used last time
if existing != nil && existing.VMDriver != "" {
ds := driver.Status(existing.VMDriver)
if existing != nil && existing.Driver != "" {
ds := driver.Status(existing.Driver)
out.T(out.Sparkle, `Using the {{.driver}} driver based on existing profile`, out.V{"driver": ds.String()})
return ds
}
......@@ -532,7 +521,7 @@ func validateDriver(ds registry.DriverState, existing *config.MachineConfig) {
out.ErrLn("")
if !st.Installed && !viper.GetBool(force) {
if existing != nil && name == existing.VMDriver {
if existing != nil && name == existing.Driver {
exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed, but is specified by an existing profile. Please run 'minikube delete' or install {{.driver}}", out.V{"driver": name})
}
exit.WithCodeT(exit.Unavailable, "{{.driver}} does not appear to be installed", out.V{"driver": name})
......@@ -761,21 +750,6 @@ func validateRegistryMirror() {
}
}
// doCacheBinaries caches Kubernetes binaries in the foreground
func doCacheBinaries(k8sVersion string) error {
return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper))
}
// waitCacheRequiredImages blocks until the required images are all cached.
func waitCacheRequiredImages(g *errgroup.Group) {
if !viper.GetBool(cacheImages) {
return
}
if err := g.Wait(); err != nil {
glog.Errorln("Error caching images: ", err)
}
}
// generateCfgFromFlags generates config.Config based on flags and supplied arguments
func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.MachineConfig, config.Node, error) {
r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)})
......@@ -843,13 +817,13 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
Memory: pkgutil.CalculateSizeInMB(viper.GetString(memory)),
CPUs: viper.GetInt(cpus),
DiskSize: pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)),
VMDriver: drvName,
Driver: drvName,
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
NFSShare: viper.GetStringSlice(nfsShare),
NFSSharesRoot: viper.GetString(nfsSharesRoot),
DockerEnv: dockerEnv,
DockerOpt: dockerOpt,
DockerEnv: node.DockerEnv,
DockerOpt: node.DockerOpt,
InsecureRegistry: insecureRegistry,
RegistryMirror: registryMirror,
HostOnlyCIDR: viper.GetString(hostOnlyCIDR),
......@@ -903,7 +877,7 @@ func setDockerProxy() {
continue
}
}
dockerEnv = append(dockerEnv, fmt.Sprintf("%s=%s", k, v))
node.DockerEnv = append(node.DockerEnv, fmt.Sprintf("%s=%s", k, v))
}
}
}
......@@ -1053,26 +1027,6 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.MachineConfig, node config.Nod
return bs
}
// configureRuntimes does what needs to happen to get a runtime going.
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig) cruntime.Manager {
config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: runner, ImageRepository: k8s.ImageRepository, KubernetesVersion: k8s.KubernetesVersion}
cr, err := cruntime.New(config)
if err != nil {
exit.WithError("Failed runtime", err)
}
disableOthers := true
if driver.BareMetal(drvName) {
disableOthers = false
}
err = cr.Enable(disableOthers)
if err != nil {
exit.WithError("Failed to enable container runtime", err)
}
return cr
}
// bootstrapCluster starts Kubernetes using the chosen bootstrapper
func bootstrapCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner command.Runner, mc config.MachineConfig) {
out.T(out.Launch, "Launching Kubernetes ... ")
......
......@@ -54,6 +54,7 @@ var (
ErrKeyNotFound = errors.New("specified key could not be found in config")
)
// ErrNotExist is the error returned when a config does not exist
type ErrNotExist struct {
s string
}
......
......@@ -40,7 +40,7 @@ func (p *Profile) IsValid() bool {
if p.Config == nil {
return false
}
if p.Config.VMDriver == "" {
if p.Config.Driver == "" {
return false
}
for _, n := range p.Config.Nodes {
......
......@@ -54,8 +54,8 @@ func TestListProfiles(t *testing.T) {
if val[tt.index].Name != tt.expectName {
t.Errorf("expected %s got %v", tt.expectName, val[tt.index].Name)
}
if val[tt.index].Config.VMDriver != tt.vmDriver {
t.Errorf("expected %s got %v", tt.vmDriver, val[tt.index].Config.VMDriver)
if val[tt.index].Config.Driver != tt.vmDriver {
t.Errorf("expected %s got %v", tt.vmDriver, val[tt.index].Config.Driver)
}
}
......
......@@ -39,7 +39,7 @@ type MachineConfig struct {
Memory int
CPUs int
DiskSize int
VMDriver string
Driver string
HyperkitVpnKitSock string // Only used by the Hyperkit driver
HyperkitVSockPorts []string // Only used by the Hyperkit driver
DockerEnv []string // Each entry is formatted as KEY=VALUE.
......
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"os"
"github.com/golang/glog"
"github.com/spf13/viper"
"golang.org/x/sync/errgroup"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/image"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
)
// beginCacheRequiredImages caches images required for kubernetes version in the background
func beginCacheRequiredImages(g *errgroup.Group, imageRepository string, k8sVersion string) {
if !viper.GetBool("cache-images") {
return
}
g.Go(func() error {
return machine.CacheImagesForBootstrapper(imageRepository, k8sVersion, viper.GetString(cmdcfg.Bootstrapper))
})
}
func handleDownloadOnly(cacheGroup *errgroup.Group, k8sVersion string) {
// If --download-only, complete the remaining downloads and exit.
if !viper.GetBool("download-only") {
return
}
if err := doCacheBinaries(k8sVersion); err != nil {
exit.WithError("Failed to cache binaries", err)
}
waitCacheRequiredImages(cacheGroup)
if err := saveImagesToTarFromConfig(); err != nil {
exit.WithError("Failed to cache images to tar", err)
}
out.T(out.Check, "Download complete!")
os.Exit(0)
}
// doCacheBinaries caches Kubernetes binaries in the foreground
func doCacheBinaries(k8sVersion string) error {
return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper))
}
// waitCacheRequiredImages blocks until the required images are all cached.
func waitCacheRequiredImages(g *errgroup.Group) {
if !viper.GetBool(cacheImages) {
return
}
if err := g.Wait(); err != nil {
glog.Errorln("Error caching images: ", err)
}
}
// saveImagesToTarFromConfig saves images to tar in cache which specified in config file.
// currently only used by download-only option
func saveImagesToTarFromConfig() error {
images, err := imagesInConfigFile()
if err != nil {
return err
}
if len(images) == 0 {
return nil
}
return image.SaveToDir(images, constants.ImageCacheDir)
}
func imagesInConfigFile() ([]string, error) {
configFile, err := config.ReadConfig(localpath.ConfigFile)
if err != nil {
return nil, err
}
if values, ok := configFile[cacheImageConfigKey]; ok {
var images []string
for key := range values.(map[string]interface{}) {
images = append(images, key)
}
return images, nil
}
return []string{}, nil
}
// CacheAndLoadImagesInConfig loads the images currently in the config file
// called by 'start' and 'cache reload' commands.
func CacheAndLoadImagesInConfig() error {
images, err := imagesInConfigFile()
if err != nil {
return err
}
if len(images) == 0 {
return nil
}
return machine.CacheAndLoadImages(images)
}
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/out"
)
var (
DockerEnv []string
DockerOpt []string
)
// configureRuntimes does what needs to happen to get a runtime going.
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig) cruntime.Manager {
config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: runner, ImageRepository: k8s.ImageRepository, KubernetesVersion: k8s.KubernetesVersion}
cr, err := cruntime.New(config)
if err != nil {
exit.WithError("Failed runtime", err)
}
disableOthers := true
if driver.BareMetal(drvName) {
disableOthers = false
}
err = cr.Enable(disableOthers)
if err != nil {
exit.WithError("Failed to enable container runtime", err)
}
return cr
}
func showVersionInfo(k8sVersion string, cr cruntime.Manager) {
version, _ := cr.Version()
out.T(cr.Style(), "Preparing Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}} ...", out.V{"k8sVersion": k8sVersion, "runtime": cr.Name(), "runtimeVersion": version})
for _, v := range DockerOpt {
out.T(out.Option, "opt {{.docker_option}}", out.V{"docker_option": v})
}
for _, v := range DockerEnv {
out.T(out.Option, "env {{.docker_env}}", out.V{"docker_env": v})
}
}
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
"net"
"os"
"os/exec"
"strings"
"time"
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/host"
"github.com/golang/glog"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/proxy"
"k8s.io/minikube/pkg/util/retry"
)
func startMachine(cfg *config.MachineConfig, n *config.Node) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
m, err := machine.NewAPIClient()
if err != nil {
exit.WithError("Failed to get machine client", err)
}
host, preExists = startHost(m, *cfg)
runner, err = machine.CommandRunner(host)
if err != nil {
exit.WithError("Failed to get command runner", err)
}
ip := validateNetwork(host, runner)
// Bypass proxy for minikube's vm host ip
err = proxy.ExcludeIP(ip)
if err != nil {
out.ErrT(out.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip})
}
// Save IP to configuration file for subsequent use
n.IP = ip
if err := Save(cfg, n); err != nil {
exit.WithError("Failed to save config", err)
}
return runner, preExists, m, host
}
// startHost starts a new minikube host using a VM or None
func startHost(api libmachine.API, mc config.MachineConfig) (*host.Host, bool) {
exists, err := api.Exists(mc.Name)
if err != nil {
exit.WithError("Failed to check if machine exists", err)
}
host, err := cluster.StartHost(api, mc)
if err != nil {
exit.WithError("Unable to start VM. Please investigate and run 'minikube delete' if possible", err)
}
return host, exists
}
// validateNetwork tries to catch network problems as soon as possible
func validateNetwork(h *host.Host, r command.Runner) string {
ip, err := h.Driver.GetIP()
if err != nil {
exit.WithError("Unable to get VM IP address", err)
}
optSeen := false
warnedOnce := false
for _, k := range proxy.EnvVars {
if v := os.Getenv(k); v != "" {
if !optSeen {
out.T(out.Internet, "Found network options:")
optSeen = true
}
out.T(out.Option, "{{.key}}={{.value}}", out.V{"key": k, "value": v})
ipExcluded := proxy.IsIPExcluded(ip) // Skip warning if minikube ip is already in NO_PROXY
k = strings.ToUpper(k) // for http_proxy & https_proxy
if (k == "HTTP_PROXY" || k == "HTTPS_PROXY") && !ipExcluded && !warnedOnce {
out.WarningT("You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP ({{.ip_address}}). Please see {{.documentation_url}} for more details", out.V{"ip_address": ip, "documentation_url": "https://minikube.sigs.k8s.io/docs/reference/networking/proxy/"})
warnedOnce = true
}
}
}
if !driver.BareMetal(h.Driver.DriverName()) && !driver.IsKIC(h.Driver.DriverName()) {
trySSH(h, ip)
}
tryLookup(r)
tryRegistry(r)
return ip
}
func trySSH(h *host.Host, ip string) {
if viper.GetBool("force") {
return
}
sshAddr := net.JoinHostPort(ip, "22")
dial := func() (err error) {
d := net.Dialer{Timeout: 3 * time.Second}
conn, err := d.Dial("tcp", sshAddr)
if err != nil {
out.WarningT("Unable to verify SSH connectivity: {{.error}}. Will retry...", out.V{"error": err})
return err
}
_ = conn.Close()
return nil
}
if err := retry.Expo(dial, time.Second, 13*time.Second); err != nil {
exit.WithCodeT(exit.IO, `minikube is unable to connect to the VM: {{.error}}
This is likely due to one of two reasons:
- VPN or firewall interference
- {{.hypervisor}} network configuration issue
Suggested workarounds:
- Disable your local VPN or firewall software
- Configure your local VPN or firewall to allow access to {{.ip}}
- Restart or reinstall {{.hypervisor}}
- Use an alternative --vm-driver
- Use --force to override this connectivity check
`, out.V{"error": err, "hypervisor": h.Driver.DriverName(), "ip": ip})
}
}
func tryLookup(r command.Runner) {
// DNS check
if rr, err := r.RunCmd(exec.Command("nslookup", "kubernetes.io", "-type=ns")); err != nil {
glog.Infof("%s failed: %v which might be okay will retry nslookup without query type", rr.Args, err)
// will try with without query type for ISOs with different busybox versions.
if _, err = r.RunCmd(exec.Command("nslookup", "kubernetes.io")); err != nil {
glog.Warningf("nslookup failed: %v", err)
out.WarningT("Node may be unable to resolve external DNS records")
}
}
}
func tryRegistry(r command.Runner) {
// Try an HTTPS connection to the image repository
proxy := os.Getenv("HTTPS_PROXY")
opts := []string{"-sS"}
if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") {
opts = append([]string{"-x", proxy}, opts...)
}
repo := viper.GetString(imageRepository)