-
-
Notifications
You must be signed in to change notification settings - Fork 261
Description
We're using GitLab and GitLab CI pipelines in our organisation. Here we defined a linter job, which uses the megalinter image provided by this repository. Jobs are then executed by GitLab runners of type kubernetes executor
which then spawn a pod which subsequently executes the aformentioned job.
Since v7.4.0 however, we can't run those jobs anymore, since the pod which executes the job can't be created successfully anymore. It exits with the following error message:
Log Output
fatal error: unexpected signal during runtime execution
[signal SIGSEGV: segmentation violation code=0x1 addr=0x3c0d0 pc=0x3c0d0]
runtime stack:
runtime.throw({0x5586b5e7d789?, 0x7fffd3bf2f80?})
/usr/lib/golang/src/runtime/panic.go:992 +0x71 fp=0x7fffd3bf27c8 sp=0x7fffd3bf2798 pc=0x5586b5a9c291
runtime.sigpanic()
/usr/lib/golang/src/runtime/signal_unix.go:802 +0x389 fp=0x7fffd3bf2818 sp=0x7fffd3bf27c8 pc=0x5586b5ab1be9
goroutine 1 [syscall, locked to thread]:
runtime.cgocall(0x5586b5e5ee30, 0xc0000dd5c8)
/usr/lib/golang/src/runtime/cgocall.go:157 +0x5c fp=0xc0000dd5a0 sp=0xc0000dd568 pc=0x5586b5a6bc5c
crypto/internal/boring._Cfunc__goboringcrypto_DLOPEN_OPENSSL()
_cgo_gotypes.go:304 +0x4d fp=0xc0000dd5c8 sp=0xc0000dd5a0 pc=0x5586b5ca270d
crypto/internal/boring.init.0()
/usr/lib/golang/src/crypto/internal/boring/boring.go:52 +0x45 fp=0xc0000dd600 sp=0xc0000dd5c8 pc=0x5586b5ca3825
runtime.doInit(0x5586b64c3c00)
/usr/lib/golang/src/runtime/proc.go:6230 +0x128 fp=0xc0000dd730 sp=0xc0000dd600 pc=0x5586b5aab8e8
runtime.doInit(0x5586b64c0e60)
/usr/lib/golang/src/runtime/proc.go:6207 +0x71 fp=0xc0000dd860 sp=0xc0000dd730 pc=0x5586b5aab831
runtime.doInit(0x5586b64c4d40)
/usr/lib/golang/src/runtime/proc.go:6207 +0x71 fp=0xc0000dd990 sp=0xc0000dd860 pc=0x5586b5aab831
runtime.doInit(0x5586b64c5bc0)
/usr/lib/golang/src/runtime/proc.go:6207 +0x71 fp=0xc0000ddac0 sp=0xc0000dd990 pc=0x5586b5aab831
runtime.doInit(0x5586b64c36c0)
/usr/lib/golang/src/runtime/proc.go:6207 +0x71 fp=0xc0000ddbf0 sp=0xc0000ddac0 pc=0x5586b5aab831
runtime.doInit(0x5586b64c3ca0)
/usr/lib/golang/src/runtime/proc.go:6207 +0x71 fp=0xc0000ddd20 sp=0xc0000ddbf0 pc=0x5586b5aab831
runtime.doInit(0x5586b64c7aa0)
/usr/lib/golang/src/runtime/proc.go:6207 +0x71 fp=0xc0000dde50 sp=0xc0000ddd20 pc=0x5586b5aab831
runtime.doInit(0x5586b64c72a0)
/usr/lib/golang/src/runtime/proc.go:6207 +0x71 fp=0xc0000ddf80 sp=0xc0000dde50 pc=0x5586b5aab831
runtime.main()
/usr/lib/golang/src/runtime/proc.go:233 +0x1d4 fp=0xc0000ddfe0 sp=0xc0000ddf80 pc=0x5586b5a9e974
runtime.goexit()
/usr/lib/golang/src/runtime/asm_amd64.s:1571 +0x1 fp=0xc0000ddfe8 sp=0xc0000ddfe0 pc=0x5586b5acc581
goroutine 2 [force gc (idle)]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
/usr/lib/golang/src/runtime/proc.go:361 +0xd6 fp=0xc00005cfb0 sp=0xc00005cf90 pc=0x5586b5a9ed76
runtime.goparkunlock(...)
/usr/lib/golang/src/runtime/proc.go:367
runtime.forcegchelper()
/usr/lib/golang/src/runtime/proc.go:301 +0xad fp=0xc00005cfe0 sp=0xc00005cfb0 pc=0x5586b5a9ec0d
runtime.goexit()
/usr/lib/golang/src/runtime/asm_amd64.s:1571 +0x1 fp=0xc00005cfe8 sp=0xc00005cfe0 pc=0x5586b5acc581
created by runtime.init.7
/usr/lib/golang/src/runtime/proc.go:289 +0x25
goroutine 3 [GC sweep wait]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
/usr/lib/golang/src/runtime/proc.go:361 +0xd6 fp=0xc00005d790 sp=0xc00005d770 pc=0x5586b5a9ed76
runtime.goparkunlock(...)
/usr/lib/golang/src/runtime/proc.go:367
runtime.bgsweep(0x0?)
/usr/lib/golang/src/runtime/mgcsweep.go:278 +0x8e fp=0xc00005d7c8 sp=0xc00005d790 pc=0x5586b5a8bbae
runtime.gcenable.func1()
/usr/lib/golang/src/runtime/mgc.go:177 +0x26 fp=0xc00005d7e0 sp=0xc00005d7c8 pc=0x5586b5a81766
runtime.goexit()
/usr/lib/golang/src/runtime/asm_amd64.s:1571 +0x1 fp=0xc00005d7e8 sp=0xc00005d7e0 pc=0x5586b5acc581
created by runtime.gcenable
/usr/lib/golang/src/runtime/mgc.go:177 +0x6b
goroutine 4 [GC scavenge wait]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
/usr/lib/golang/src/runtime/proc.go:361 +0xd6 fp=0xc00005df20 sp=0xc00005df00 pc=0x5586b5a9ed76
runtime.goparkunlock(...)
/usr/lib/golang/src/runtime/proc.go:367
runtime.bgscavenge(0x0?)
/usr/lib/golang/src/runtime/mgcscavenge.go:272 +0xec fp=0xc00005dfc8 sp=0xc00005df20 pc=0x5586b5a8984c
runtime.gcenable.func2()
/usr/lib/golang/src/runtime/mgc.go:178 +0x26 fp=0xc00005dfe0 sp=0xc00005dfc8 pc=0x5586b5a8[170](XXX#L170)6
runtime.goexit()
/usr/lib/golang/src/runtime/asm_amd64.s:1571 +0x1 fp=0xc00005dfe8 sp=0xc00005dfe0 pc=0x5586b5acc581
created by runtime.gcenable
/usr/lib/golang/src/runtime/mgc.go:178 +0xaa
goroutine 18 [finalizer wait]:
runtime.gopark(0xc0000924e0?, 0x0?, 0x70?, 0xc7?, 0x5586b5aab831?)
/usr/lib/golang/src/runtime/proc.go:361 +0xd6 fp=0xc00005c630 sp=0xc00005c610 pc=0x5586b5a9ed76
runtime.goparkunlock(...)
/usr/lib/golang/src/runtime/proc.go:367
runtime.runfinq()
/usr/lib/golang/src/runtime/mfinal.go:[177](XXX/-/jobs/171913749#L177) +0xb3 fp=0xc00005c7e0 sp=0xc00005c630 pc=0x5586b5a80813
runtime.goexit()
/usr/lib/golang/src/runtime/asm_amd64.s:1571 +0x1 fp=0xc00005c7e8 sp=0xc00005c7e0 pc=0x5586b5acc581
created by runtime.createfing
/usr/lib/golang/src/runtime/mfinal.go:157 +0x45
time="[202](XXX#L202)3-11-30T14:54:52Z" level=error msg="exec failed: unable to start container process: read init-p: connection reset by peer"
On AWS with GitLab runners of type docker executor
running on EC2 instances however, it still works as expected.
On AWS in an EKS cluster with runners of type kubernetes executor
it works as well.
I therefore suspect a problem with the underlying runtime, which for the docker executors is docker
of course, für EKS containerd
and for OpenShift cri-o
. Could also be permission-related, as in our OpenShift clusters a lot of capabilities are prohibited. We haven't however changed those restrictions, and past versions of the image still work.
This happens only with versions higher than v7.4.0, all versions below this work just fine.
To Reproduce
Steps to reproduce the behavior:
- Setup GitLab project and create a job running current megalinter image
- Use GitLab
k8s executor
runner in OpenShift to run it - Watch log output
- See error
Expected behavior
Image works as it did before in all runtimes.