-
I tested with a simple build and it works fine. Can you please let me know how to reproduce? Including a sample Dockerfile and buildspec.
-
FROM node:18-alpine AS base # Install dependencies only when needed FROM base AS deps RUN apk add --no-cache libc6-compat WORKDIR /app # Install dependencies based on the preferred package manager COPY package.json yarn.lock* package-lock.json* pnpm-lock.yaml* ./ RUN \ if [ -f yarn.lock ]; then yarn --frozen-lockfile; \ elif [ -f package-lock.json ]; then npm ci; \ elif [ -f pnpm-lock.yaml ]; then corepack enable && corepack prepare pnpm@latest-8 --activate && pnpm i --frozen-lockfile; \ else echo "Lockfile not found." && exit 1; \ fi # Rebuild the source code only when needed FROM base AS builder WORKDIR /app COPY --from=deps /app/node_modules ./node_modules COPY . . # Next.js collects completely anonymous telemetry data about general usage. # Learn more here: https://nextjs.org/telemetry # Uncomment the following line in case you want to disable telemetry during the build. # ENV NEXT_TELEMETRY_DISABLED 1 #RUN yarn build RUN yarn build # If using npm comment out above and use below instead # RUN npm run build # Production image, copy all the files and run next FROM base AS runner WORKDIR /app ENV NODE_ENV production RUN addgroup --system --gid 1001 nodejs RUN adduser --system --uid 1001 nextjs COPY --from=builder --chown=nextjs:nodejs /app ./ USER nextjs EXPOSE 3000 ENV PORT 3000 CMD ["yarn", "start"] -
version: 22 jobs: - name: build jobExecutor: hephaistos-01 steps: - !CheckoutStep name: checkout cloneCredential: !DefaultCredential {} withLfs: false withSubmodules: false condition: ALL_PREVIOUS_STEPS_WERE_SUCCESSFUL - !SetBuildVersionStep name: set build version buildVersion: '@script:builtin:node:determine-project-version@' condition: ALL_PREVIOUS_STEPS_WERE_SUCCESSFUL - !BuildImageWithKanikoStep name: build destinations: nexus.private.tld/email-signature-webapp:@build_version@ condition: ALL_PREVIOUS_STEPS_WERE_SUCCESSFUL retryCondition: never maxRetries: 3 retryDelay: 30 timeout: 3600 - name: helm jobExecutor: hephaistos-01 steps: - !CheckoutStep name: checkout cloneCredential: !DefaultCredential {} withLfs: false withSubmodules: false condition: ALL_PREVIOUS_STEPS_WERE_SUCCESSFUL - !SetBuildVersionStep name: set build version buildVersion: '@script:builtin:node:determine-project-version@' condition: ALL_PREVIOUS_STEPS_WERE_SUCCESSFUL - !CommandStep name: publish runInContainer: true image: alpine/helm interpreter: !DefaultInterpreter commands: - helm plugin install https://github.com/chartmuseum/helm-push - helm repo add --username=admin --password=@secret:nexus-password@ helm-repo https://nexus.private.tld/repository/helm/ - cd helm && helm package . - helm cm-push --username=admin --password=@secret:nexus-password@ --context-path=/repository/helm/ ./*.tgz helm-repo useTTY: false condition: ALL_PREVIOUS_STEPS_WERE_SUCCESSFUL retryCondition: never maxRetries: 3 retryDelay: 30 timeout: 3600 -
14:32:19 Step "build" is successful 14:32:19 /onedev-build/command/2.sh: line 25: touch: not foundThe last line is where the build getting stuck
-
How should I populate the project? Or can you attach a workable sample project here?
-
Previous Value Current Value Open
Closed
-
Closing as unable to reproduce. Feel free to reopen with more details.
| Type |
Bug
|
| Priority |
Normal
|
| Assignee | |
| Affected Versions |
8.3.4
|
Issue Votes (0)
Hi @robin the new kaniko builder works like a charm. Thank you for the swift work you did there. I just had one minor issue/bug as the kaniko step exits with
/onedev-build/command/2.sh: line 25: touch: not foundand hangs up after, which results in the pipeline getting stuck in running state.Support other Kubernetes CRIs like containerd (#1402)