1
0
Fork 0
mirror of https://github.com/rharkor/caching-for-turbo.git synced 2025-06-08 01:37:01 +09:00
This commit is contained in:
rharkor 2025-04-10 15:01:47 +02:00
parent f7d1916e3c
commit 45686dce54
12 changed files with 48965 additions and 82 deletions

View file

@ -107,3 +107,42 @@ jobs:
- name: Test build cache (full cache)
run: ./check-full-turbo.sh
test-action-s3:
name: GitHub Actions Test (S3)
runs-on: ubuntu-latest
steps:
- name: Checkout
id: checkout
uses: actions/checkout@v4
- name: Setup Node.js
id: setup-node
uses: actions/setup-node@v4
with:
node-version-file: .node-version
cache: npm
- name: Install Dependencies
id: npm-ci
run: npm ci
- name: Test Local Action
uses: ./
with:
# Use a unique cache prefix for each pipeline & job
cache-prefix:
${{ runner.os }}-${{ github.run_id }}-${{ github.run_number }}-${{
runner.os }}
provider: s3
s3-access-key-id: ${{ secrets.S3_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_SECRET_ACCESS_KEY }}
s3-bucket: ${{ secrets.S3_BUCKET }}
s3-region: ${{ secrets.S3_REGION }}
- name: Test build cache
run: npm run test
- name: Test build cache (full cache)
run: ./check-full-turbo.sh

View file

@ -12,7 +12,7 @@ branding:
# Define your inputs here.
inputs:
provider:
description: 'Provider to use for caching (github)'
description: 'Provider to use for caching (github, s3)'
required: true
default: 'github'
cache-prefix:
@ -34,6 +34,22 @@ inputs:
'Cleanup oldest cache files when total size exceeds this limit (ex: 100mb,
10gb)'
required: false
s3-access-key-id:
description: 'AWS S3 access key ID (required when provider is s3)'
required: false
s3-secret-access-key:
description: 'AWS S3 secret access key (required when provider is s3)'
required: false
s3-bucket:
description: 'AWS S3 bucket name (required when provider is s3)'
required: false
s3-region:
description: 'AWS S3 region (required when provider is s3)'
required: false
s3-prefix:
description: 'Prefix for S3 objects'
required: false
default: 'turbogha/'
runs:
using: node20

32554
dist/setup/index.js generated vendored

File diff suppressed because one or more lines are too long

2
dist/setup/index.js.map generated vendored

File diff suppressed because one or more lines are too long

14537
dist/setup/licenses.txt generated vendored

File diff suppressed because it is too large Load diff

1659
package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -38,6 +38,8 @@
"dependencies": {
"@actions/cache": "^4.0.0",
"@actions/core": "^1.10.1",
"@aws-sdk/client-s3": "^3.0.0",
"@aws-sdk/lib-storage": "^3.0.0",
"fastify": "^5.0.0",
"parse-duration": "^2.1.4",
"stream-to-promise": "^3.0.0",

View file

@ -2,8 +2,8 @@ import * as core from '@actions/core'
import { Readable } from 'stream'
import { TListFile } from './server/cleanup'
import { RequestContext } from './server'
import { getGithubProvider } from './cache'
import { getGithubProvider } from './providers/cache'
import { getS3Provider } from './providers/s3'
export type TProvider = {
save: (
ctx: RequestContext,
@ -17,7 +17,7 @@ export type TProvider = {
) => Promise<
[number | undefined, Readable | ReadableStream, string | undefined] | null
>
delete: () => Promise<void>
delete: (hash: string) => Promise<void>
list: () => Promise<TListFile[]>
}
@ -26,6 +26,9 @@ export const getProvider = (): TProvider => {
if (provider === 'github') {
return getGithubProvider()
}
if (provider === 's3') {
return getS3Provider()
}
throw new Error(`Provider ${provider} not supported`)
}

View file

@ -1,5 +1,5 @@
import { Readable } from 'node:stream'
import { env } from '../env'
import { env } from '../../env'
import { pipeline } from 'node:stream/promises'
import {
createReadStream,
@ -7,12 +7,12 @@ import {
existsSync,
statSync
} from 'node:fs'
import { getCacheKey, getFsCachePath, getTempCachePath } from '../constants'
import { RequestContext } from '../server'
import { getCacheKey, getFsCachePath, getTempCachePath } from '../../constants'
import { RequestContext } from '../../server'
import * as core from '@actions/core'
import { TListFile } from '../server/cleanup'
import { TListFile } from '../../server/cleanup'
import { getCacheClient } from './utils'
import { TProvider } from '../providers'
import { TProvider } from '../../providers'
//* Cache API
export async function saveCache(

View file

@ -1,10 +1,10 @@
import { Readable } from 'node:stream'
import { env } from '../env'
import { env } from '../../env'
import * as core from '@actions/core'
import streamToPromise from 'stream-to-promise'
import { createWriteStream } from 'node:fs'
import { unlink } from 'node:fs/promises'
import { getTempCachePath } from '../constants'
import { getTempCachePath } from '../../constants'
import { restoreCache, saveCache } from '@actions/cache'
class HandledError extends Error {
status: number

View file

@ -0,0 +1,205 @@
import { TProvider } from 'src/lib/providers'
import * as core from '@actions/core'
import { Readable } from 'stream'
import { RequestContext } from '../../server'
import { TListFile } from '../../server/cleanup'
import {
S3Client,
GetObjectCommand,
DeleteObjectCommand,
ListObjectsV2Command
} from '@aws-sdk/client-s3'
import { Upload } from '@aws-sdk/lib-storage'
export const getS3Provider = (): TProvider => {
const s3AccessKeyId = core.getInput('s3-access-key-id')
const s3SecretAccessKey = core.getInput('s3-secret-access-key')
const s3Bucket = core.getInput('s3-bucket')
const s3Region = core.getInput('s3-region')
const s3Prefix = core.getInput('s3-prefix') || 'turbogha/'
if (!s3AccessKeyId || !s3SecretAccessKey || !s3Bucket || !s3Region) {
throw new Error(
'S3 provider requires s3-access-key-id, s3-secret-access-key, s3-bucket, and s3-region inputs'
)
}
const s3Client = new S3Client({
region: s3Region,
credentials: {
accessKeyId: s3AccessKeyId,
secretAccessKey: s3SecretAccessKey
}
})
const getObjectKey = (hash: string, tag?: string): string => {
if (tag) {
return `${s3Prefix}${hash}#${tag}`
}
return `${s3Prefix}${hash}`
}
const save = async (
ctx: RequestContext,
hash: string,
tag: string,
stream: Readable
): Promise<void> => {
const objectKey = getObjectKey(hash, tag)
try {
// Use the S3 Upload utility which handles multipart uploads for large files
const upload = new Upload({
client: s3Client,
params: {
Bucket: s3Bucket,
Key: objectKey,
Body: stream,
ContentType: 'application/octet-stream'
}
})
await upload.done()
ctx.log.info(`Saved artifact to S3: ${objectKey}`)
} catch (error) {
ctx.log.info(`Error saving artifact to S3: ${error}`)
throw error
}
}
const get = async (
ctx: RequestContext,
hash: string
): Promise<
[number | undefined, Readable | ReadableStream, string | undefined] | null
> => {
// First try to get with just the hash
const objectKey = getObjectKey(hash)
try {
// Try to find the object
const listCommand = new ListObjectsV2Command({
Bucket: s3Bucket,
Prefix: objectKey,
MaxKeys: 10
})
const listResponse = await s3Client.send(listCommand)
if (!listResponse.Contents || listResponse.Contents.length === 0) {
ctx.log.info(`No cached artifact found for ${hash}`)
return null
}
// Find the most recent object that matches the hash prefix
const matchingObjects = listResponse.Contents.filter(
obj => obj.Key && obj.Key.startsWith(objectKey)
)
if (matchingObjects.length === 0) {
return null
}
// Sort by last modified date, newest first
matchingObjects.sort((a, b) => {
const dateA = a.LastModified?.getTime() || 0
const dateB = b.LastModified?.getTime() || 0
return dateB - dateA
})
const latestObject = matchingObjects[0]
const key = latestObject.Key as string
// Get the object
const getCommand = new GetObjectCommand({
Bucket: s3Bucket,
Key: key
})
const response = await s3Client.send(getCommand)
if (!response.Body) {
ctx.log.info(`Failed to get artifact body from S3`)
return null
}
const size = response.ContentLength
const stream = response.Body as Readable
// Extract the tag if it exists
let artifactTag: string | undefined
if (key.includes('#')) {
const parts = key.split('#')
artifactTag = parts[parts.length - 1]
}
ctx.log.info(`Retrieved artifact from S3: ${key}`)
return [size, stream, artifactTag]
} catch (error) {
ctx.log.info(`Error getting artifact from S3: ${error}`)
return null
}
}
const deleteObj = async (hash: string): Promise<void> => {
try {
const deleteCommand = new DeleteObjectCommand({
Bucket: s3Bucket,
Key: hash.startsWith(s3Prefix) ? hash : getObjectKey(hash)
})
await s3Client.send(deleteCommand)
} catch (error) {
core.error(`Error deleting artifact from S3: ${error}`)
throw error
}
}
const list = async (): Promise<TListFile[]> => {
try {
const listCommand = new ListObjectsV2Command({
Bucket: s3Bucket,
Prefix: s3Prefix
})
const files: TListFile[] = []
let continuationToken: string | undefined
do {
if (continuationToken) {
listCommand.input.ContinuationToken = continuationToken
}
const response = await s3Client.send(listCommand)
if (response.Contents) {
const objects = response.Contents.filter(obj => obj.Key).map(
(obj): TListFile => {
return {
path: obj.Key as string,
createdAt: (obj.LastModified || new Date()).toISOString(),
size: obj.Size || 0
}
}
)
files.push(...objects)
}
continuationToken = response.NextContinuationToken
} while (continuationToken)
return files
} catch (error) {
core.error(`Error listing artifacts from S3: ${error}`)
throw error
}
}
return {
save,
get,
delete: deleteObj,
list
}
}

View file

@ -1,7 +1,7 @@
import * as core from '@actions/core'
import { RequestContext } from '.'
import { deleteCache, listCache } from '../cache'
import parse from 'parse-duration'
import { getProvider } from '../providers'
export type TListFile = {
path: string
@ -40,7 +40,9 @@ export async function cleanup(ctx: RequestContext) {
throw new Error('Invalid max-size provided')
}
const files = await listCache()
const provider = getProvider()
const files = await provider.list()
const fileToDelete: TListFile[] = []
if (maxAgeParsed) {
@ -87,7 +89,7 @@ export async function cleanup(ctx: RequestContext) {
for (const file of fileToDelete) {
try {
await deleteCache()
await provider.delete(file.path)
ctx.log.info(`Deleted ${file.path}`)
} catch (error) {
core.error(`Failed to delete ${file.path}: ${error}`)