multi.go 18.7 KB
Newer Older
Kamil Trzciński's avatar
Kamil Trzciński committed
1
package commands
2 3

import (
4 5
	"errors"
	"fmt"
6 7
	"net"
	"net/http"
8
	"net/http/pprof"
9 10
	"os"
	"os/signal"
11
	"runtime"
12
	"syscall"
13
	"time"
Kamil Trzciński's avatar
Kamil Trzciński committed
14

15
	"github.com/ayufan/golang-kardianos-service"
16 17
	"github.com/prometheus/client_golang/prometheus"
	"github.com/prometheus/client_golang/prometheus/promhttp"
18
	"github.com/sirupsen/logrus"
19
	"github.com/urfave/cli"
Kamil Trzciński's avatar
Kamil Trzciński committed
20

21 22
	"gitlab.com/gitlab-org/gitlab-runner/common"
	"gitlab.com/gitlab-org/gitlab-runner/helpers"
23
	"gitlab.com/gitlab-org/gitlab-runner/helpers/certificate"
24 25 26
	prometheus_helper "gitlab.com/gitlab-org/gitlab-runner/helpers/prometheus"
	"gitlab.com/gitlab-org/gitlab-runner/helpers/sentry"
	"gitlab.com/gitlab-org/gitlab-runner/helpers/service"
27
	"gitlab.com/gitlab-org/gitlab-runner/log"
28
	"gitlab.com/gitlab-org/gitlab-runner/network"
29
	"gitlab.com/gitlab-org/gitlab-runner/session"
30 31
)

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
var (
	concurrentDesc = prometheus.NewDesc(
		"gitlab_runner_concurrent",
		"The current value of concurrent setting",
		nil,
		nil,
	)

	limitDesc = prometheus.NewDesc(
		"gitlab_runner_limit",
		"The current value of concurrent setting",
		[]string{"runner"},
		nil,
	)
)

48
type RunCommand struct {
49
	configOptionsWithListenAddress
50
	network common.Network
51
	healthHelper
Kamil Trzciński's avatar
Kamil Trzciński committed
52 53

	buildsHelper buildsHelper
54 55 56 57

	ServiceName      string `short:"n" long:"service" description:"Use different names for different services"`
	WorkingDirectory string `short:"d" long:"working-directory" description:"Specify custom working directory"`
	User             string `short:"u" long:"user" description:"Use specific user to execute shell scripts"`
58
	Syslog           bool   `long:"syslog" description:"Log to system service logger" env:"LOG_SYSLOG"`
59

60 61
	sentryLogHook     sentry.LogHook
	prometheusLogHook prometheus_helper.LogHook
62

63 64
	failuresCollector               *prometheus_helper.FailuresCollector
	networkRequestStatusesCollector prometheus.Collector
65

66 67
	sessionServer *session.Server

68 69 70 71 72 73 74 75 76 77 78 79
	// abortBuilds is used to abort running builds
	abortBuilds chan os.Signal

	// runSignal is used to abort current operation (scaling workers, waiting for config)
	runSignal chan os.Signal

	// reloadSignal is used to trigger forceful config reload
	reloadSignal chan os.Signal

	// stopSignals is to catch a signals notified to process: SIGTERM, SIGQUIT, Interrupt, Kill
	stopSignals chan os.Signal

80 81 82
	// stopSignal is used to preserve the signal that was used to stop the
	// process In case this is SIGQUIT it makes to finish all builds and session
	// server.
83 84 85 86
	stopSignal os.Signal

	// runFinished is used to notify that Run() did finish
	runFinished chan bool
87 88

	currentWorkers int
Kamil Trzciński's avatar
Kamil Trzciński committed
89 90
}

91 92
func (mr *RunCommand) log() *logrus.Entry {
	return logrus.WithField("builds", mr.buildsHelper.buildsCount())
Kamil Trzciński's avatar
Kamil Trzciński committed
93 94
}

95
func (mr *RunCommand) feedRunner(runner *common.RunnerConfig, runners chan *common.RunnerConfig) {
96 97
	if !mr.isHealthy(runner.UniqueID()) {
		return
Kamil Trzciński's avatar
Kamil Trzciński committed
98 99
	}

100
	runners <- runner
101
}
102

103
func (mr *RunCommand) feedRunners(runners chan *common.RunnerConfig) {
104
	for mr.stopSignal == nil {
105 106
		mr.log().Debugln("Feeding runners to channel")
		config := mr.config
107 108 109

		// If no runners wait full interval to test again
		if len(config.Runners) == 0 {
110
			time.Sleep(config.GetCheckInterval())
111 112 113
			continue
		}

114 115
		interval := config.GetCheckInterval() / time.Duration(len(config.Runners))

116
		// Feed runner with waiting exact amount of time
117 118
		for _, runner := range config.Runners {
			mr.feedRunner(runner, runners)
119
			time.Sleep(interval)
120
		}
Kamil Trzciński's avatar
Kamil Trzciński committed
121
	}
122 123
}

124 125 126 127 128 129 130 131 132 133
func (mr *RunCommand) requeueRunner(runner *common.RunnerConfig, runners chan *common.RunnerConfig) {
	select {
	case runners <- runner:
		mr.log().WithField("runner", runner.ShortDescription()).Debugln("Requeued the runner")

	default:
		mr.log().WithField("runner", runner.ShortDescription()).Debugln("Failed to requeue the runner: ")
	}
}

134 135
// requestJob will check if the runner can send another concurrent request to
// GitLab, if not the return value is nil.
136
func (mr *RunCommand) requestJob(runner *common.RunnerConfig, sessionInfo *common.SessionInfo) (common.JobTrace, *common.JobResponse, error) {
137
	if !mr.buildsHelper.acquireRequest(runner) {
138 139
		mr.log().WithField("runner", runner.ShortDescription()).
			Debugln("Failed to request job: runner requestConcurrency meet")
140
		return nil, nil, nil
141 142 143
	}
	defer mr.buildsHelper.releaseRequest(runner)

144
	jobData, healthy := mr.network.RequestJob(*runner, sessionInfo)
145
	mr.makeHealthy(runner.UniqueID(), healthy)
146

147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
	if jobData == nil {
		return nil, nil, nil
	}

	// Make sure to always close output
	jobCredentials := &common.JobCredentials{
		ID:    jobData.ID,
		Token: jobData.Token,
	}

	trace, err := mr.network.ProcessJob(*runner, jobCredentials)
	if err != nil {
		jobInfo := common.UpdateJobInfo{
			ID:            jobCredentials.ID,
			State:         common.Failed,
			FailureReason: common.RunnerSystemFailure,
		}

		// send failure once
		mr.network.UpdateJob(*runner, jobCredentials, jobInfo)
		return nil, nil, err
	}

	trace.SetFailuresCollector(mr.failuresCollector)
	return trace, jobData, nil
172 173
}

174
func (mr *RunCommand) processRunner(id int, runner *common.RunnerConfig, runners chan *common.RunnerConfig) (err error) {
175 176 177 178 179
	provider := common.GetExecutor(runner.Executor)
	if provider == nil {
		return
	}

180
	executorData, releaseFn, err := mr.acquireRunnerResources(provider, runner)
181 182 183
	if err != nil {
		return
	}
184
	defer releaseFn()
185

186
	buildSession, sessionInfo, err := mr.createSession(provider)
187 188 189 190
	if err != nil {
		return
	}

191
	// Receive a new build
192 193
	trace, jobData, err := mr.requestJob(runner, sessionInfo)
	if err != nil || jobData == nil {
194
		return
Kamil Trzciński's avatar
Kamil Trzciński committed
195
	}
196 197 198 199 200 201 202 203
	defer func() {
		if err != nil {
			fmt.Fprintln(trace, err.Error())
			trace.Fail(err, common.RunnerSystemFailure)
		} else {
			trace.Fail(nil, common.NoneFailure)
		}
	}()
204

Kamil Trzciński's avatar
Kamil Trzciński committed
205
	// Create a new build
206 207 208 209
	build, err := common.NewBuild(*jobData, runner, mr.abortBuilds, executorData)
	if err != nil {
		return
	}
210
	build.Session = buildSession
Kamil Trzciński's avatar
Kamil Trzciński committed
211 212 213 214 215

	// Add build to list of builds to assign numbers
	mr.buildsHelper.addBuild(build)
	defer mr.buildsHelper.removeBuild(build)

216 217
	// Process the same runner by different worker again
	// to speed up taking the builds
218
	mr.requeueRunner(runner, runners)
219

220
	// Process a build
Kamil Trzciński's avatar
Kamil Trzciński committed
221
	return build.Run(mr.config, trace)
222 223
}

224 225 226 227 228 229 230
func (mr *RunCommand) acquireRunnerResources(provider common.ExecutorProvider, runner *common.RunnerConfig) (common.ExecutorData, func(), error) {
	executorData, err := provider.Acquire(runner)
	if err != nil {
		return nil, func() {}, fmt.Errorf("failed to update executor: %v", err)
	}

	if !mr.buildsHelper.acquireBuild(runner) {
231
		provider.Release(runner, executorData)
232 233 234 235 236
		return nil, nil, errors.New("failed to request job, runner limit met")
	}

	releaseFn := func() {
		mr.buildsHelper.releaseBuild(runner)
237
		provider.Release(runner, executorData)
238 239 240 241 242
	}

	return executorData, releaseFn, nil
}

243 244 245 246 247 248 249
func (mr *RunCommand) createSession(provider common.ExecutorProvider) (*session.Session, *common.SessionInfo, error) {
	var features common.FeaturesInfo

	if err := provider.GetFeatures(&features); err != nil {
		return nil, nil, err
	}

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
	if mr.sessionServer == nil || !features.Session {
		return nil, nil, nil
	}

	sess, err := session.NewSession(mr.log())
	if err != nil {
		return nil, nil, err
	}

	sessionInfo := &common.SessionInfo{
		URL:           mr.sessionServer.AdvertiseAddress + sess.Endpoint,
		Certificate:   string(mr.sessionServer.CertificatePublicKey),
		Authorization: sess.Token,
	}

	return sess, sessionInfo, err
}

268
func (mr *RunCommand) processRunners(id int, stopWorker chan bool, runners chan *common.RunnerConfig) {
Kamil Trzciński's avatar
Kamil Trzciński committed
269
	mr.log().WithField("worker", id).Debugln("Starting worker")
270
	for mr.stopSignal == nil {
271 272
		select {
		case runner := <-runners:
273 274
			err := mr.processRunner(id, runner, runners)
			if err != nil {
275 276 277 278 279
				mr.log().WithFields(logrus.Fields{
					"runner":   runner.ShortDescription(),
					"executor": runner.Executor,
				}).WithError(err).
					Error("Failed to process runner")
280
			}
281 282 283

			// force GC cycle after processing build
			runtime.GC()
284

285
		case <-stopWorker:
Kamil Trzciński's avatar
Kamil Trzciński committed
286
			mr.log().WithField("worker", id).Debugln("Stopping worker")
287
			return
288 289
		}
	}
290
	<-stopWorker
291 292
}

293
func (mr *RunCommand) startWorkers(startWorker chan int, stopWorker chan bool, runners chan *common.RunnerConfig) {
294
	for mr.stopSignal == nil {
295 296
		id := <-startWorker
		go mr.processRunners(id, stopWorker, runners)
297
	}
298 299
}

300 301
func (mr *RunCommand) loadConfig() error {
	err := mr.configOptions.loadConfig()
Kamil Trzciński's avatar
Kamil Trzciński committed
302
	if err != nil {
303
		return err
304
	}
305 306

	// Set log level
307 308 309
	err = mr.updateLoggingConfiguration()
	if err != nil {
		return err
310
	}
Kamil Trzciński's avatar
Kamil Trzciński committed
311

312
	// pass user to execute scripts as specific user
313
	if mr.User != "" {
314
		mr.config.User = mr.User
315 316
	}

317
	mr.healthy = nil
318 319
	mr.log().Println("Configuration loaded")
	mr.log().Debugln(helpers.ToYAML(mr.config))
320 321 322

	// initialize sentry
	if mr.config.SentryDSN != nil {
Kamil Trzciński's avatar
Kamil Trzciński committed
323 324 325 326 327
		var err error
		mr.sentryLogHook, err = sentry.NewLogHook(*mr.config.SentryDSN)
		if err != nil {
			mr.log().WithError(err).Errorln("Sentry failure")
		}
328 329 330 331
	} else {
		mr.sentryLogHook = sentry.LogHook{}
	}

332 333 334
	return nil
}

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
func (mr *RunCommand) updateLoggingConfiguration() error {
	reloadNeeded := false

	if mr.config.LogLevel != nil && !log.Configuration().IsLevelSetWithCli() {
		err := log.Configuration().SetLevel(*mr.config.LogLevel)
		if err != nil {
			return err
		}

		reloadNeeded = true
	}

	if mr.config.LogFormat != nil && !log.Configuration().IsFormatSetWithCli() {
		err := log.Configuration().SetFormat(*mr.config.LogFormat)
		if err != nil {
			return err
		}

		reloadNeeded = true
	}

	if reloadNeeded {
		log.Configuration().ReloadConfiguration()
	}

	return nil
}

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
func (mr *RunCommand) checkConfig() (err error) {
	info, err := os.Stat(mr.ConfigFile)
	if err != nil {
		return err
	}

	if !mr.config.ModTime.Before(info.ModTime()) {
		return nil
	}

	err = mr.loadConfig()
	if err != nil {
		mr.log().Errorln("Failed to load config", err)
		// don't reload the same file
		mr.config.ModTime = info.ModTime()
		return
	}
380 381 382
	return nil
}

383
func (mr *RunCommand) Start(s service.Service) error {
384
	mr.abortBuilds = make(chan os.Signal)
385
	mr.runSignal = make(chan os.Signal, 1)
386
	mr.reloadSignal = make(chan os.Signal, 1)
387 388
	mr.runFinished = make(chan bool, 1)
	mr.stopSignals = make(chan os.Signal)
389
	mr.log().Println("Starting multi-runner from", mr.ConfigFile, "...")
390

391 392
	userModeWarning(false)

393 394
	if len(mr.WorkingDirectory) > 0 {
		err := os.Chdir(mr.WorkingDirectory)
395 396 397 398 399
		if err != nil {
			return err
		}
	}

400 401
	err := mr.loadConfig()
	if err != nil {
402
		return err
403 404
	}

405 406 407 408 409 410
	// Start should not block. Do the actual work async.
	go mr.Run()

	return nil
}

411
func (mr *RunCommand) updateWorkers(workerIndex *int, startWorker chan int, stopWorker chan bool) os.Signal {
412 413
	buildLimit := mr.config.Concurrent

414 415 416 417
	if buildLimit < 1 {
		mr.log().Fatalln("Concurrent is less than 1 - no jobs will be processed")
	}

418
	for mr.currentWorkers > buildLimit {
419 420
		select {
		case stopWorker <- true:
421
		case signaled := <-mr.runSignal:
422 423
			return signaled
		}
424
		mr.currentWorkers--
425 426
	}

427
	for mr.currentWorkers < buildLimit {
428 429
		select {
		case startWorker <- *workerIndex:
430
		case signaled := <-mr.runSignal:
431 432
			return signaled
		}
433
		mr.currentWorkers++
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
		*workerIndex++
	}

	return nil
}

func (mr *RunCommand) updateConfig() os.Signal {
	select {
	case <-time.After(common.ReloadConfigInterval * time.Second):
		err := mr.checkConfig()
		if err != nil {
			mr.log().Errorln("Failed to load config", err)
		}

	case <-mr.reloadSignal:
		err := mr.loadConfig()
		if err != nil {
			mr.log().Errorln("Failed to load config", err)
		}

454
	case signaled := <-mr.runSignal:
455 456 457 458 459
		return signaled
	}
	return nil
}

460 461 462 463 464 465 466
func (mr *RunCommand) runWait() {
	mr.log().Debugln("Waiting for stop signal")

	// Save the stop signal and exit to execute Stop()
	mr.stopSignal = <-mr.stopSignals
}

467
func (mr *RunCommand) serveMetrics(mux *http.ServeMux) {
468 469
	registry := prometheus.NewRegistry()
	// Metrics about the runner's business logic.
470
	registry.MustRegister(&mr.buildsHelper)
471
	registry.MustRegister(mr)
472
	// Metrics about API connections
473
	registry.MustRegister(mr.networkRequestStatusesCollector)
474 475
	// Metrics about jobs failures
	registry.MustRegister(mr.failuresCollector)
476 477
	// Metrics about catched errors
	registry.MustRegister(&mr.prometheusLogHook)
478 479 480 481 482
	// Metrics about the program's build version.
	registry.MustRegister(common.AppVersion.NewMetricsCollector())
	// Go-specific metrics about the process (GC stats, goroutines, etc.).
	registry.MustRegister(prometheus.NewGoCollector())
	// Go-unrelated process metrics (memory usage, file descriptors, etc.).
483
	registry.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
484

485 486 487 488 489 490 491
	// Register all executor provider collectors
	for _, provider := range common.GetExecutorProviders() {
		if collector, ok := provider.(prometheus.Collector); ok && collector != nil {
			registry.MustRegister(collector)
		}
	}

492
	mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))
493 494
}

495
func (mr *RunCommand) serveDebugData(mux *http.ServeMux) {
496
	mux.HandleFunc("/debug/jobs/list", mr.buildsHelper.ListJobsHandler)
497 498
}

499 500 501 502 503 504 505 506
func (mr *RunCommand) servePprof(mux *http.ServeMux) {
	mux.HandleFunc("/debug/pprof/", pprof.Index)
	mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
	mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
	mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
	mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}

507
func (mr *RunCommand) setupMetricsAndDebugServer() {
508
	listenAddress, err := mr.listenAddress()
509 510

	if err != nil {
511
		mr.log().Errorf("invalid listen address: %s", err.Error())
512 513 514
		return
	}

515
	if listenAddress == "" {
516
		mr.log().Info("listen_address not defined, metrics & debug endpoints disabled")
517
		return
518 519
	}

520 521
	// We separate out the listener creation here so that we can return an error if
	// the provided address is invalid or there is some other listener error.
522
	listener, err := net.Listen("tcp", listenAddress)
523
	if err != nil {
524
		mr.log().WithError(err).Fatal("Failed to create listener for metrics server")
525 526
	}

527 528
	mux := http.NewServeMux()

529
	go func() {
530 531 532 533
		err := http.Serve(listener, mux)
		if err != nil {
			mr.log().WithError(err).Fatal("Metrics server terminated")
		}
534 535
	}()

536 537
	mr.serveMetrics(mux)
	mr.serveDebugData(mux)
538
	mr.servePprof(mux)
539

540 541 542
	mr.log().
		WithField("address", listenAddress).
		Info("Metrics server listening")
543 544
}

545 546
func (mr *RunCommand) setupSessionServer() {
	if mr.config.SessionServer.ListenAddress == "" {
547
		mr.log().Info("[session_server].listen_address not defined, session endpoints disabled")
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
		return
	}

	var err error
	mr.sessionServer, err = session.NewServer(
		session.ServerConfig{
			AdvertiseAddress: mr.config.SessionServer.AdvertiseAddress,
			ListenAddress:    mr.config.SessionServer.ListenAddress,
			ShutdownTimeout:  common.ShutdownTimeout * time.Second,
		},
		mr.log(),
		certificate.X509Generator{},
		mr.buildsHelper.findSessionByURL,
	)
	if err != nil {
		mr.log().WithError(err).Fatal("Failed to create session server")
	}

	go func() {
		err := mr.sessionServer.Start()
		if err != nil {
569
			mr.log().WithError(err).Fatal("Session server terminated")
570 571 572 573 574 575 576 577
		}
	}()

	mr.log().
		WithField("address", mr.config.SessionServer.ListenAddress).
		Info("Session server listening")
}

578 579
func (mr *RunCommand) Run() {
	mr.setupMetricsAndDebugServer()
580
	mr.setupSessionServer()
581

582
	runners := make(chan *common.RunnerConfig)
583 584
	go mr.feedRunners(runners)

585 586 587
	signal.Notify(mr.stopSignals, syscall.SIGQUIT, syscall.SIGTERM, os.Interrupt, os.Kill)
	signal.Notify(mr.reloadSignal, syscall.SIGHUP)

588 589 590
	startWorker := make(chan int)
	stopWorker := make(chan bool)
	go mr.startWorkers(startWorker, stopWorker, runners)
591

592
	workerIndex := 0
593

594
	for mr.stopSignal == nil {
595
		signaled := mr.updateWorkers(&workerIndex, startWorker, stopWorker)
596 597
		if signaled != nil {
			break
Kamil Trzciński's avatar
Kamil Trzciński committed
598 599
		}

600 601 602
		signaled = mr.updateConfig()
		if signaled != nil {
			break
603 604 605 606
		}
	}

	// Wait for workers to shutdown
607
	for mr.currentWorkers > 0 {
608
		stopWorker <- true
609
		mr.currentWorkers--
610
	}
611
	mr.log().Println("All workers stopped. Can exit now")
612
	mr.runFinished <- true
613
}
614

615
func (mr *RunCommand) interruptRun() {
616
	// Pump interrupt signal
617 618 619 620 621 622 623 624 625 626 627
	for {
		mr.runSignal <- mr.stopSignal
	}
}

func (mr *RunCommand) abortAllBuilds() {
	// Pump signal to abort all current builds
	for {
		mr.abortBuilds <- mr.stopSignal
	}
}
628

629
func (mr *RunCommand) handleGracefulShutdown() error {
630
	// We wait till we have a SIGQUIT
631 632
	for mr.stopSignal == syscall.SIGQUIT {
		mr.log().Warningln("Requested quit, waiting for builds to finish")
633 634 635 636

		// Wait for other signals to finish builds
		select {
		case mr.stopSignal = <-mr.stopSignals:
637
		// We received a new signal
638 639 640 641 642

		case <-mr.runFinished:
			// Everything finished we can exit now
			return nil
		}
643 644
	}

645 646 647 648
	return fmt.Errorf("received: %v", mr.stopSignal)
}

func (mr *RunCommand) handleShutdown() error {
649 650
	mr.log().Warningln("Requested service stop:", mr.stopSignal)

651
	go mr.abortAllBuilds()
652

653 654 655 656
	if mr.sessionServer != nil {
		mr.sessionServer.Close()
	}

657
	// Wait for graceful shutdown or abort after timeout
658 659 660 661 662 663
	for {
		select {
		case mr.stopSignal = <-mr.stopSignals:
			return fmt.Errorf("forced exit: %v", mr.stopSignal)

		case <-time.After(common.ShutdownTimeout * time.Second):
664
			return errors.New("shutdown timed out")
665 666

		case <-mr.runFinished:
Kamil Trzciński's avatar
Kamil Trzciński committed
667
			// Everything finished we can exit now
668 669
			return nil
		}
670
	}
671
}
Kamil Trzciński's avatar
Kamil Trzciński committed
672

673 674 675 676 677 678 679 680 681 682
func (mr *RunCommand) Stop(s service.Service) (err error) {
	go mr.interruptRun()
	err = mr.handleGracefulShutdown()
	if err == nil {
		return
	}
	err = mr.handleShutdown()
	return
}

683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
// Describe implements prometheus.Collector.
func (mr *RunCommand) Describe(ch chan<- *prometheus.Desc) {
	ch <- concurrentDesc
	ch <- limitDesc
}

// Collect implements prometheus.Collector.
func (mr *RunCommand) Collect(ch chan<- prometheus.Metric) {
	config := mr.config

	ch <- prometheus.MustNewConstMetric(
		concurrentDesc,
		prometheus.GaugeValue,
		float64(config.Concurrent),
	)

	for _, runner := range config.Runners {
		ch <- prometheus.MustNewConstMetric(
			limitDesc,
			prometheus.GaugeValue,
			float64(runner.Limit),
			runner.ShortDescription(),
		)
	}
}

709
func (mr *RunCommand) Execute(context *cli.Context) {
710
	svcConfig := &service.Config{
711 712
		Name:        mr.ServiceName,
		DisplayName: mr.ServiceName,
713
		Description: defaultDescription,
714
		Arguments:   []string{"run"},
715 716 717
		Option: service.KeyValue{
			"RunWait": mr.runWait,
		},
718 719
	}

720
	svc, err := service_helpers.New(mr, svcConfig)
721
	if err != nil {
722
		logrus.Fatalln(err)
723 724
	}

725
	if mr.Syslog {
726
		log.SetSystemLogger(logrus.StandardLogger(), svc)
727 728
	}

729 730
	logrus.AddHook(&mr.sentryLogHook)
	logrus.AddHook(&mr.prometheusLogHook)
731

732
	err = svc.Run()
733
	if err != nil {
734
		logrus.Fatalln(err)
735 736 737 738
	}
}

func init() {
739 740
	requestStatusesCollector := network.NewAPIRequestStatusesMap()

741
	common.RegisterCommand2("run", "run multi runner service", &RunCommand{
Alessio Caiazza's avatar
Alessio Caiazza committed
742 743
		ServiceName: defaultServiceName,
		network:     network.NewGitLabClientWithRequestStatusesMap(requestStatusesCollector),
744 745 746
		networkRequestStatusesCollector: requestStatusesCollector,
		prometheusLogHook:               prometheus_helper.NewLogHook(),
		failuresCollector:               prometheus_helper.NewFailuresCollector(),
747
		buildsHelper:                    newBuildsHelper(),
748
	})
749
}