Skip to content

Commit c739d3f

Browse files
authored
Merge pull request #7928 from sharifelgamal/mem
improve auto-select memory for multinode clusters
2 parents 6f6ac3a + 90cd6c3 commit c739d3f

File tree

6 files changed

+41
-22
lines changed

6 files changed

+41
-22
lines changed

cmd/minikube/cmd/node_add.go

+6
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package cmd
1818

1919
import (
2020
"github.com/spf13/cobra"
21+
"github.com/spf13/viper"
2122
"k8s.io/minikube/pkg/minikube/config"
2223
"k8s.io/minikube/pkg/minikube/driver"
2324
"k8s.io/minikube/pkg/minikube/exit"
@@ -54,6 +55,11 @@ var nodeAddCmd = &cobra.Command{
5455
KubernetesVersion: cc.KubernetesConfig.KubernetesVersion,
5556
}
5657

58+
// Make sure to decrease the default amount of memory we use per VM if this is the first worker node
59+
if len(cc.Nodes) == 1 && viper.GetString(memory) == "" {
60+
cc.Memory = 2200
61+
}
62+
5763
if err := node.Add(cc, n); err != nil {
5864
_, err := maybeDeleteAndRetry(*cc, n, nil, err)
5965
if err != nil {

cmd/minikube/cmd/start.go

+5-1
Original file line numberDiff line numberDiff line change
@@ -715,7 +715,7 @@ func memoryLimits(drvName string) (int, int, error) {
715715
}
716716

717717
// suggestMemoryAllocation calculates the default memory footprint in MB
718-
func suggestMemoryAllocation(sysLimit int, containerLimit int) int {
718+
func suggestMemoryAllocation(sysLimit int, containerLimit int, nodes int) int {
719719
if mem := viper.GetInt(memory); mem != 0 {
720720
return mem
721721
}
@@ -737,6 +737,10 @@ func suggestMemoryAllocation(sysLimit int, containerLimit int) int {
737737
// Suggest 25% of RAM, rounded to nearest 100MB. Hyper-V requires an even number!
738738
suggested := int(float32(sysLimit)/400.0) * 100
739739

740+
if nodes > 1 {
741+
suggested /= nodes
742+
}
743+
740744
if suggested > maximum {
741745
return maximum
742746
}

cmd/minikube/cmd/start_flags.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
220220
glog.Warningf("Unable to query memory limits: %v", err)
221221
}
222222

223-
mem := suggestMemoryAllocation(sysLimit, containerLimit)
223+
mem := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes))
224224
if cmd.Flags().Changed(memory) {
225225
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
226226
if err != nil {

cmd/minikube/cmd/start_test.go

+23-14
Original file line numberDiff line numberDiff line change
@@ -185,25 +185,34 @@ func TestSuggestMemoryAllocation(t *testing.T) {
185185
description string
186186
sysLimit int
187187
containerLimit int
188+
nodes int
188189
want int
189190
}{
190-
{"128GB sys", 128000, 0, 6000},
191-
{"64GB sys", 64000, 0, 6000},
192-
{"16GB sys", 16384, 0, 4000},
193-
{"odd sys", 14567, 0, 3600},
194-
{"4GB sys", 4096, 0, 2200},
195-
{"2GB sys", 2048, 0, 2048},
196-
{"Unable to poll sys", 0, 0, 2200},
197-
{"128GB sys, 16GB container", 128000, 16384, 16336},
198-
{"64GB sys, 16GB container", 64000, 16384, 16000},
199-
{"16GB sys, 4GB container", 16384, 4096, 4000},
200-
{"4GB sys, 3.5GB container", 16384, 3500, 3452},
201-
{"2GB sys, 2GB container", 16384, 2048, 2048},
202-
{"2GB sys, unable to poll container", 16384, 0, 4000},
191+
{"128GB sys", 128000, 0, 1, 6000},
192+
{"64GB sys", 64000, 0, 1, 6000},
193+
{"32GB sys", 32768, 0, 1, 6000},
194+
{"16GB sys", 16384, 0, 1, 4000},
195+
{"odd sys", 14567, 0, 1, 3600},
196+
{"4GB sys", 4096, 0, 1, 2200},
197+
{"2GB sys", 2048, 0, 1, 2048},
198+
{"Unable to poll sys", 0, 0, 1, 2200},
199+
{"128GB sys, 16GB container", 128000, 16384, 1, 16336},
200+
{"64GB sys, 16GB container", 64000, 16384, 1, 16000},
201+
{"16GB sys, 4GB container", 16384, 4096, 1, 4000},
202+
{"4GB sys, 3.5GB container", 16384, 3500, 1, 3452},
203+
{"16GB sys, 2GB container", 16384, 2048, 1, 2048},
204+
{"16GB sys, unable to poll container", 16384, 0, 1, 4000},
205+
{"128GB sys 2 nodes", 128000, 0, 2, 6000},
206+
{"8GB sys 3 nodes", 8192, 0, 3, 2200},
207+
{"16GB sys 2 nodes", 16384, 0, 2, 2200},
208+
{"32GB sys 2 nodes", 32768, 0, 2, 4050},
209+
{"odd sys 2 nodes", 14567, 0, 2, 2200},
210+
{"4GB sys 2 nodes", 4096, 0, 2, 2200},
211+
{"2GB sys 3 nodes", 2048, 0, 3, 2048},
203212
}
204213
for _, test := range tests {
205214
t.Run(test.description, func(t *testing.T) {
206-
got := suggestMemoryAllocation(test.sysLimit, test.containerLimit)
215+
got := suggestMemoryAllocation(test.sysLimit, test.containerLimit, test.nodes)
207216
if got != test.want {
208217
t.Errorf("defaultMemorySize(sys=%d, container=%d) = %d, want: %d", test.sysLimit, test.containerLimit, got, test.want)
209218
}

pkg/minikube/node/start.go

+2-1
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,8 @@ func configureRuntimes(runner cruntime.CommandRunner, cc config.ClusterConfig, k
239239
disableOthers = false
240240
}
241241

242-
// Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere.
242+
// Preload is overly invasive for bare metal, and caching is not meaningful.
243+
// KIC handles preload elsewhere.
243244
if driver.IsVM(cc.Driver) {
244245
if err := cr.Preload(cc.KubernetesConfig); err != nil {
245246
switch err.(type) {

test/integration/multinode_test.go

+4-5
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ func TestMultiNode(t *testing.T) {
2929
if NoneDriver() {
3030
t.Skip("none driver does not support multinode")
3131
}
32-
MaybeParallel(t)
3332

3433
type validatorFunc func(context.Context, *testing.T, string)
3534
profile := UniqueProfileName("multinode")
@@ -65,7 +64,7 @@ func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) {
6564
}
6665

6766
// Make sure minikube status shows 2 nodes
68-
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
67+
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr"))
6968
if err != nil {
7069
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
7170
}
@@ -89,7 +88,7 @@ func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile strin
8988
}
9089

9190
// Make sure minikube status shows 3 nodes
92-
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
91+
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr"))
9392
if err != nil {
9493
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
9594
}
@@ -121,7 +120,7 @@ func validateStopRunningNode(ctx context.Context, t *testing.T, profile string)
121120
}
122121

123122
// Make sure minikube status shows 2 running nodes and 1 stopped one
124-
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
123+
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr"))
125124
if err != nil && rr.ExitCode != 7 {
126125
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
127126
}
@@ -177,7 +176,7 @@ func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile
177176
}
178177

179178
// Make sure status is back down to 2 hosts
180-
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
179+
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr"))
181180
if err != nil {
182181
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
183182
}

0 commit comments

Comments
 (0)