abstract.go 17.7 KB
Newer Older
1 2 3
package shells

import (
4
	"errors"
5
	"fmt"
6
	"net/url"
7
	"path"
8
	"path/filepath"
9
	"strconv"
10
	"strings"
11

12
	"gitlab.com/gitlab-org/gitlab-runner/cache"
13
	"gitlab.com/gitlab-org/gitlab-runner/common"
14
	"gitlab.com/gitlab-org/gitlab-runner/helpers/tls"
15 16 17
)

type AbstractShell struct {
18 19 20 21
}

func (b *AbstractShell) GetFeatures(features *common.FeaturesInfo) {
	features.Artifacts = true
22
	features.UploadMultipleArtifacts = true
23
	features.UploadRawArtifacts = true
24
	features.Cache = true
25
	features.Refspecs = true
26
	features.Masking = true
27 28
}

29 30
func (b *AbstractShell) writeCdBuildDir(w ShellWriter, info common.ShellScriptInfo) {
	w.Cd(info.Build.FullProjectDir())
31
}
32

33 34 35 36 37 38
func (b *AbstractShell) writeExports(w ShellWriter, info common.ShellScriptInfo) {
	for _, variable := range info.Build.GetAllVariables() {
		w.Variable(variable)
	}
}

39
func (b *AbstractShell) writeGitSSLConfig(w ShellWriter, build *common.Build, where []string) {
40
	repoURL, err := url.Parse(build.Runner.URL)
41
	if err != nil {
42 43
		w.Warning("git SSL config: Can't parse repository URL. %s", err)
		return
44
	}
45

46
	repoURL.Path = ""
47 48
	host := repoURL.String()
	variables := build.GetCITLSVariables()
49
	args := append([]string{"config"}, where...)
50 51 52 53 54 55 56

	for variable, config := range map[string]string{
		tls.VariableCAFile:   "sslCAInfo",
		tls.VariableCertFile: "sslCert",
		tls.VariableKeyFile:  "sslKey",
	} {
		if variables.Get(variable) == "" {
57 58
			continue
		}
59 60

		key := fmt.Sprintf("http.%s.%s", host, config)
61
		w.Command("git", append(args, key, w.EnvVariableKey(variable))...)
62
	}
63

64
	return
65 66
}

67
func (b *AbstractShell) writeGitCleanup(w ShellWriter, build *common.Build) {
68 69 70 71 72 73 74 75 76 77 78 79
	// Remove .git/{index,shallow,HEAD}.lock files from .git, which can fail the fetch command
	// The file can be left if previous build was terminated during git operation
	w.RmFile(".git/index.lock")
	w.RmFile(".git/shallow.lock")
	w.RmFile(".git/HEAD.lock")

	w.RmFile(".git/hooks/post-checkout")
}

func (b *AbstractShell) writeRefspecFetchCmd(w ShellWriter, build *common.Build, projectDir string, gitDir string) {
	depth := build.GitInfo.Depth

80 81 82 83 84 85
	if depth > 0 {
		w.Notice("Fetching changes with git depth set to %d...", depth)
	} else {
		w.Notice("Fetching changes...")
	}

86 87 88 89 90 91 92 93 94 95 96
	// initializing
	templateDir := w.MkTmpDir("git-template")
	templateFile := path.Join(templateDir, "config")

	w.Command("git", "config", "-f", templateFile, "fetch.recurseSubmodules", "false")
	if build.IsSharedEnv() {
		b.writeGitSSLConfig(w, build, []string{"-f", templateFile})
	}

	w.Command("git", "init", projectDir, "--template", templateDir)
	w.Cd(projectDir)
97
	b.writeGitCleanup(w, build)
98 99

	// Add `git remote` or update existing
100
	w.IfCmd("git", "remote", "add", "origin", build.GetRemoteURL())
101 102 103 104 105 106 107 108 109 110 111 112 113 114
	w.Notice("Created fresh repository.")
	w.Else()
	w.Command("git", "remote", "set-url", "origin", build.GetRemoteURL())
	w.EndIf()

	fetchArgs := []string{"fetch", "origin", "--prune"}
	fetchArgs = append(fetchArgs, build.GitInfo.Refspecs...)
	if depth > 0 {
		fetchArgs = append(fetchArgs, "--depth", strconv.Itoa(depth))
	}

	w.Command("git", fetchArgs...)
}

115
func (b *AbstractShell) writeCheckoutCmd(w ShellWriter, build *common.Build) {
116 117
	w.Notice("Checking out %s as %s...", build.GitInfo.Sha[0:8], build.GitInfo.Ref)
	w.Command("git", "checkout", "-f", "-q", build.GitInfo.Sha)
118

119 120 121 122
	cleanFlags := build.GetGitCleanFlags()
	if len(cleanFlags) > 0 {
		cleanArgs := append([]string{"clean"}, cleanFlags...)
		w.Command("git", cleanArgs...)
123
	}
124 125
}

126 127 128 129 130 131 132
func (b *AbstractShell) writeSubmoduleUpdateCmd(w ShellWriter, build *common.Build, recursive bool) {
	if recursive {
		w.Notice("Updating/initializing submodules recursively...")
	} else {
		w.Notice("Updating/initializing submodules...")
	}

133 134
	// Sync .git/config to .gitmodules in case URL changes (e.g. new build token)
	args := []string{"submodule", "sync"}
135 136 137
	if recursive {
		args = append(args, "--recursive")
	}
138
	w.Command("git", args...)
139

140
	// Update / initialize submodules
141 142
	updateArgs := []string{"submodule", "update", "--init"}
	foreachArgs := []string{"submodule", "foreach"}
143
	if recursive {
144 145
		updateArgs = append(updateArgs, "--recursive")
		foreachArgs = append(foreachArgs, "--recursive")
146
	}
147 148 149

	// Clean changed files in submodules
	// "git submodule update --force" option not supported in Git 1.7.1 (shipped with CentOS 6)
Ethan Reesor's avatar
Ethan Reesor committed
150
	w.Command("git", append(foreachArgs, "git", "clean", "-ffxd")...)
151 152
	w.Command("git", append(foreachArgs, "git", "reset", "--hard")...)
	w.Command("git", updateArgs...)
153 154 155 156 157 158

	if !build.IsLFSSmudgeDisabled() {
		w.IfCmd("git-lfs", "version")
		w.Command("git", append(foreachArgs, "git", "lfs", "pull")...)
		w.EndIf()
	}
159 160
}

161
func (b *AbstractShell) cacheFile(build *common.Build, userKey string) (key, file string) {
162
	if build.CacheDir == "" {
163
		return
164
	}
165 166

	// Deduce cache key
167
	key = path.Join(build.JobInfo.Name, build.GitInfo.Ref)
168 169
	if userKey != "" {
		key = build.GetAllVariables().ExpandValue(userKey)
170 171
	}

172 173
	// Ignore cache without the key
	if key == "" {
174 175 176
		return
	}

177 178 179 180 181 182 183 184
	file = path.Join(build.CacheDir, key, "cache.zip")
	file, err := filepath.Rel(build.BuildDir, file)
	if err != nil {
		return "", ""
	}
	return
}

185
func (b *AbstractShell) guardRunnerCommand(w ShellWriter, runnerCommand string, action string, f func()) {
186 187
	if runnerCommand == "" {
		w.Warning("%s is not supported by this executor.", action)
188 189
		return
	}
190 191 192 193 194 195 196 197

	w.IfCmd(runnerCommand, "--version")
	f()
	w.Else()
	w.Warning("Missing %s. %s is disabled.", runnerCommand, action)
	w.EndIf()
}

198
func (b *AbstractShell) cacheExtractor(w ShellWriter, info common.ShellScriptInfo) error {
199
	for _, cacheOptions := range info.Build.Cache {
200 201

		// Create list of files to extract
202 203 204 205
		archiverArgs := []string{}
		for _, path := range cacheOptions.Paths {
			archiverArgs = append(archiverArgs, "--path", path)
		}
206

207 208 209
		if cacheOptions.Untracked {
			archiverArgs = append(archiverArgs, "--untracked")
		}
210

211 212 213 214
		// Skip restoring cache if no cache is defined
		if len(archiverArgs) < 1 {
			continue
		}
215

216
		// Skip extraction if no cache is defined
217 218
		cacheKey, cacheFile := b.cacheFile(info.Build, cacheOptions.Key)
		if cacheKey == "" {
219
			w.Notice("Skipping cache extraction due to empty cache key")
220 221
			continue
		}
222

223 224 225 226 227 228 229
		if ok, err := cacheOptions.CheckPolicy(common.CachePolicyPull); err != nil {
			return fmt.Errorf("%s for %s", err, cacheKey)
		} else if !ok {
			w.Notice("Not downloading cache %s due to policy", cacheKey)
			continue
		}

230 231 232
		args := []string{
			"cache-extractor",
			"--file", cacheFile,
233
			"--timeout", strconv.Itoa(info.Build.GetCacheRequestTimeout()),
234
		}
235

236
		// Generate cache download address
237
		if url := cache.GetCacheDownloadURL(info.Build, cacheKey); url != nil {
238 239
			args = append(args, "--url", url.String())
		}
240

241 242 243
		// Execute cache-extractor command. Failure is not fatal.
		b.guardRunnerCommand(w, info.RunnerCommand, "Extracting cache", func() {
			w.Notice("Checking cache for %s...", cacheKey)
244
			w.IfCmdWithOutput(info.RunnerCommand, args...)
245 246 247 248 249
			w.Notice("Successfully extracted cache")
			w.Else()
			w.Warning("Failed to extract cache")
			w.EndIf()
		})
250
	}
251 252

	return nil
253 254
}

255
func (b *AbstractShell) downloadArtifacts(w ShellWriter, job common.Dependency, info common.ShellScriptInfo) {
256 257 258 259 260
	args := []string{
		"artifacts-downloader",
		"--url",
		info.Build.Runner.URL,
		"--token",
261
		job.Token,
262
		"--id",
263
		strconv.Itoa(job.ID),
264 265
	}

266
	w.Notice("Downloading artifacts for %s (%d)...", job.Name, job.ID)
267 268 269
	w.Command(info.RunnerCommand, args...)
}

270
func (b *AbstractShell) jobArtifacts(info common.ShellScriptInfo) (otherJobs []common.Dependency) {
271 272
	for _, otherJob := range info.Build.Dependencies {
		if otherJob.ArtifactsFile.Filename == "" {
273
			continue
274
		}
275 276

		otherJobs = append(otherJobs, otherJob)
277 278 279 280
	}
	return
}

281 282 283
func (b *AbstractShell) downloadAllArtifacts(w ShellWriter, info common.ShellScriptInfo) {
	otherJobs := b.jobArtifacts(info)
	if len(otherJobs) == 0 {
284
		return
285
	}
286

287
	b.guardRunnerCommand(w, info.RunnerCommand, "Artifacts downloading", func() {
288 289
		for _, otherJob := range otherJobs {
			b.downloadArtifacts(w, otherJob, info)
290
		}
291
	})
292 293
}

294
func (b *AbstractShell) writePrepareScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
295 296 297
	return nil
}

298
func (b *AbstractShell) writeCloneFetchCmds(w ShellWriter, info common.ShellScriptInfo) error {
299 300
	build := info.Build

301 302 303 304 305
	// If LFS smudging was disabled by the user (by setting the GIT_LFS_SKIP_SMUDGE variable
	// when defining the job) we're skipping this step.
	//
	// In other case we're disabling smudging here to prevent us from memory
	// allocation failures.
306 307 308
	//
	// Please read https://gitlab.com/gitlab-org/gitlab-runner/issues/3366 and
	// https://github.com/git-lfs/git-lfs/issues/3524 for context.
309 310 311
	if !build.IsLFSSmudgeDisabled() {
		w.Variable(common.JobVariable{Key: "GIT_LFS_SKIP_SMUDGE", Value: "1"})
	}
312

313 314 315
	err := b.handleGetSourcesStrategy(w, build)
	if err != nil {
		return err
316
	}
317

318
	if build.GetGitCheckout() {
319
		b.writeCheckoutCmd(w, build)
320

321 322 323 324 325 326
		// If LFS smudging was disabled by the user (by setting the GIT_LFS_SKIP_SMUDGE variable
		// when defining the job) we're skipping this step.
		//
		// In other case, because we've disabled LFS smudging above, we need now manually call
		// `git lfs pull` to fetch and checkout all LFS objects that may be present in
		// the repository.
327 328 329
		//
		// Repositories without LFS objects (and without any LFS metadata) will be not
		// affected by this command.
330 331 332 333 334 335 336 337 338
		//
		// Please read https://gitlab.com/gitlab-org/gitlab-runner/issues/3366 and
		// https://github.com/git-lfs/git-lfs/issues/3524 for context.
		if !build.IsLFSSmudgeDisabled() {
			w.IfCmd("git-lfs", "version")
			w.Command("git", "lfs", "pull")
			w.EmptyLine()
			w.EndIf()
		}
339
	} else {
Julian Rabe's avatar
Julian Rabe committed
340
		w.Notice("Skipping Git checkout")
341 342
	}

343 344 345
	return nil
}

346 347 348 349 350
func (b *AbstractShell) handleGetSourcesStrategy(w ShellWriter, build *common.Build) error {
	projectDir := build.FullProjectDir()
	gitDir := path.Join(build.FullProjectDir(), ".git")

	switch build.GetGitStrategy() {
351
	case common.GitFetch:
352
		b.writeRefspecFetchCmd(w, build, projectDir, gitDir)
353
	case common.GitClone:
354
		w.RmDir(projectDir)
355
		b.writeRefspecFetchCmd(w, build, projectDir, gitDir)
356 357 358
	case common.GitNone:
		w.Notice("Skipping Git repository setup")
		w.MkDir(projectDir)
359 360
	default:
		return errors.New("unknown GIT_STRATEGY")
361
	}
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387

	return nil
}

func (b *AbstractShell) writeSubmoduleUpdateCmds(w ShellWriter, info common.ShellScriptInfo) (err error) {
	build := info.Build

	switch build.GetSubmoduleStrategy() {
	case common.SubmoduleNormal:
		b.writeSubmoduleUpdateCmd(w, build, false)

	case common.SubmoduleRecursive:
		b.writeSubmoduleUpdateCmd(w, build, true)

	case common.SubmoduleNone:
		w.Notice("Skipping Git submodules setup")

	default:
		return errors.New("unknown GIT_SUBMODULE_STRATEGY")
	}

	return nil
}

func (b *AbstractShell) writeGetSourcesScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
	b.writeExports(w, info)
388

389
	if !info.Build.IsSharedEnv() {
390
		b.writeGitSSLConfig(w, info.Build, []string{"--global"})
391
	}
392 393 394 395 396 397 398 399 400

	if info.PreCloneScript != "" && info.Build.GetGitStrategy() != common.GitNone {
		b.writeCommands(w, info.PreCloneScript)
	}

	if err := b.writeCloneFetchCmds(w, info); err != nil {
		return err
	}

401
	return b.writeSubmoduleUpdateCmds(w, info)
402
}
403

404 405 406 407
func (b *AbstractShell) writeRestoreCacheScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
	b.writeExports(w, info)
	b.writeCdBuildDir(w, info)

408
	// Try to restore from main cache, if not found cache for master
409
	return b.cacheExtractor(w, info)
410 411 412 413 414
}

func (b *AbstractShell) writeDownloadArtifactsScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
	b.writeExports(w, info)
	b.writeCdBuildDir(w, info)
415 416

	// Process all artifacts
417
	b.downloadAllArtifacts(w, info)
418
	return nil
419 420
}

421
// Write the given string of commands using the provided ShellWriter object.
422 423
func (b *AbstractShell) writeCommands(w ShellWriter, commands ...string) {
	for _, command := range commands {
424
		command = strings.TrimSpace(command)
425
		if command != "" {
426 427 428 429 430 431 432
			lines := strings.SplitN(command, "\n", 2)
			if len(lines) > 1 {
				// TODO: this should be collapsable once we introduce that in GitLab
				w.Notice("$ %s # collapsed multi-line command", lines[0])
			} else {
				w.Notice("$ %s", lines[0])
			}
433 434
		} else {
			w.EmptyLine()
435 436
		}
		w.Line(command)
437
		w.CheckForErrors()
438 439 440
	}
}

441
func (b *AbstractShell) writeUserScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
442
	var scriptStep *common.Step
443
	for _, step := range info.Build.Steps {
444
		if step.Name == common.StepNameScript {
445 446
			scriptStep = &step
			break
447 448 449 450 451 452 453
		}
	}

	if scriptStep == nil {
		return nil
	}

454 455 456
	b.writeExports(w, info)
	b.writeCdBuildDir(w, info)

457 458 459 460
	if info.PreBuildScript != "" {
		b.writeCommands(w, info.PreBuildScript)
	}

461
	b.writeCommands(w, scriptStep.Script...)
462

463 464 465 466
	if info.PostBuildScript != "" {
		b.writeCommands(w, info.PostBuildScript)
	}

467
	return nil
468 469
}

470
func (b *AbstractShell) cacheArchiver(w ShellWriter, info common.ShellScriptInfo) error {
471 472 473 474
	for _, cacheOptions := range info.Build.Cache {
		// Skip archiving if no cache is defined
		cacheKey, cacheFile := b.cacheFile(info.Build, cacheOptions.Key)
		if cacheKey == "" {
475
			w.Notice("Skipping cache archiving due to empty cache key")
476 477
			continue
		}
478

479 480 481 482 483 484 485
		if ok, err := cacheOptions.CheckPolicy(common.CachePolicyPush); err != nil {
			return fmt.Errorf("%s for %s", err, cacheKey)
		} else if !ok {
			w.Notice("Not uploading cache %s due to policy", cacheKey)
			continue
		}

486 487 488
		args := []string{
			"cache-archiver",
			"--file", cacheFile,
489
			"--timeout", strconv.Itoa(info.Build.GetCacheRequestTimeout()),
490
		}
491

492 493 494 495 496
		// Create list of files to archive
		archiverArgs := []string{}
		for _, path := range cacheOptions.Paths {
			archiverArgs = append(archiverArgs, "--path", path)
		}
497

498 499 500
		if cacheOptions.Untracked {
			archiverArgs = append(archiverArgs, "--untracked")
		}
501

502 503 504 505 506
		if len(archiverArgs) < 1 {
			// Skip creating archive
			continue
		}
		args = append(args, archiverArgs...)
507

508
		// Generate cache upload address
509
		if url := cache.GetCacheUploadURL(info.Build, cacheKey); url != nil {
510 511
			args = append(args, "--url", url.String())
		}
512

513 514 515
		// Execute cache-archiver command. Failure is not fatal.
		b.guardRunnerCommand(w, info.RunnerCommand, "Creating cache", func() {
			w.Notice("Creating cache %s...", cacheKey)
516
			w.IfCmdWithOutput(info.RunnerCommand, args...)
517 518 519 520 521
			w.Notice("Created cache")
			w.Else()
			w.Warning("Failed to create cache")
			w.EndIf()
		})
522
	}
523 524

	return nil
525 526
}

527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
func (b *AbstractShell) writeUploadArtifact(w ShellWriter, info common.ShellScriptInfo, artifact common.Artifact) {
	args := []string{
		"artifacts-uploader",
		"--url",
		info.Build.Runner.URL,
		"--token",
		info.Build.Token,
		"--id",
		strconv.Itoa(info.Build.ID),
	}

	// Create list of files to archive
	archiverArgs := []string{}
	for _, path := range artifact.Paths {
		archiverArgs = append(archiverArgs, "--path", path)
	}

	if artifact.Untracked {
		archiverArgs = append(archiverArgs, "--untracked")
	}

	if len(archiverArgs) < 1 {
		// Skip creating archive
		return
	}
	args = append(args, archiverArgs...)

	if artifact.Name != "" {
		args = append(args, "--name", artifact.Name)
	}

	if artifact.ExpireIn != "" {
		args = append(args, "--expire-in", artifact.ExpireIn)
	}

	if artifact.Format != "" {
		args = append(args, "--artifact-format", string(artifact.Format))
	}

	if artifact.Type != "" {
		args = append(args, "--artifact-type", artifact.Type)
	}

	b.guardRunnerCommand(w, info.RunnerCommand, "Uploading artifacts", func() {
		w.Notice("Uploading artifacts...")
		w.Command(info.RunnerCommand, args...)
	})
}

576
func (b *AbstractShell) writeUploadArtifacts(w ShellWriter, info common.ShellScriptInfo, onSuccess bool) {
577 578 579
	if info.Build.Runner.URL == "" {
		return
	}
580

581 582 583
	b.writeExports(w, info)
	b.writeCdBuildDir(w, info)

584
	for _, artifact := range info.Build.Artifacts {
585
		if onSuccess {
586
			if !artifact.When.OnSuccess() {
587 588 589
				continue
			}
		} else {
590
			if !artifact.When.OnFailure() {
591 592
				continue
			}
593 594
		}

595
		b.writeUploadArtifact(w, info, artifact)
596
	}
597 598
}

599
func (b *AbstractShell) writeAfterScript(w ShellWriter, info common.ShellScriptInfo) error {
600
	var afterScriptStep *common.Step
601
	for _, step := range info.Build.Steps {
602
		if step.Name == common.StepNameAfterScript {
603 604
			afterScriptStep = &step
			break
605
		}
606 607
	}

608
	if afterScriptStep == nil {
Kamil Trzciński's avatar
Kamil Trzciński committed
609 610 611
		return nil
	}

612 613 614 615
	if len(afterScriptStep.Script) == 0 {
		return nil
	}

616 617 618
	b.writeExports(w, info)
	b.writeCdBuildDir(w, info)

619
	w.Notice("Running after script...")
620
	b.writeCommands(w, afterScriptStep.Script...)
621 622 623
	return nil
}

624
func (b *AbstractShell) writeArchiveCacheScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
625 626
	b.writeExports(w, info)
	b.writeCdBuildDir(w, info)
627

628
	// Find cached files and archive them
629
	return b.cacheArchiver(w, info)
630 631
}

632
func (b *AbstractShell) writeUploadArtifactsOnSuccessScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
633
	b.writeUploadArtifacts(w, info, true)
634 635 636 637
	return
}

func (b *AbstractShell) writeUploadArtifactsOnFailureScript(w ShellWriter, info common.ShellScriptInfo) (err error) {
638
	b.writeUploadArtifacts(w, info, false)
639
	return
640
}
641

642 643
func (b *AbstractShell) writeScript(w ShellWriter, buildStage common.BuildStage, info common.ShellScriptInfo) error {
	methods := map[common.BuildStage]func(ShellWriter, common.ShellScriptInfo) error{
644 645 646 647 648 649 650 651 652
		common.BuildStagePrepare:                  b.writePrepareScript,
		common.BuildStageGetSources:               b.writeGetSourcesScript,
		common.BuildStageRestoreCache:             b.writeRestoreCacheScript,
		common.BuildStageDownloadArtifacts:        b.writeDownloadArtifactsScript,
		common.BuildStageUserScript:               b.writeUserScript,
		common.BuildStageAfterScript:              b.writeAfterScript,
		common.BuildStageArchiveCache:             b.writeArchiveCacheScript,
		common.BuildStageUploadOnSuccessArtifacts: b.writeUploadArtifactsOnSuccessScript,
		common.BuildStageUploadOnFailureArtifacts: b.writeUploadArtifactsOnFailureScript,
653 654 655 656
	}

	fn := methods[buildStage]
	if fn == nil {
657
		return errors.New("Not supported script type: " + string(buildStage))
658
	}
659 660

	return fn(w, info)
661
}