Skip to content

Commit

Permalink
test(s3): Use minio in local + CI
Browse files Browse the repository at this point in the history
  • Loading branch information
Betree committed Feb 12, 2025
1 parent d5ee42d commit 3a7e785
Show file tree
Hide file tree
Showing 10 changed files with 146 additions and 25 deletions.
6 changes: 4 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@ env:
CI: true
OC_ENV: ci
NODE_ENV: test
AWS_KEY: ${{ secrets.AWS_KEY }}
AWS_SECRET: ${{ secrets.AWS_SECRET }}

jobs:
lint:
Expand Down Expand Up @@ -239,6 +237,10 @@ jobs:
if: steps.api-node-modules.outputs.cache-hit != 'true'
run: npm ci --prefer-offline --no-audit

- # We start MinIO here because Github Actions `services` doesn't support `command`. See https://stackoverflow.com/questions/64031598/creating-a-minios3-container-inside-a-github-actions-yml-file
name: Start MinIO
run: npm run minio -- -d

- run: npm run db:restore
- run: npm run db:migrate

Expand Down
6 changes: 4 additions & 2 deletions .github/workflows/e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,6 @@ env:
TERM: xterm
STRIPE_WEBHOOK_KEY: ${{ secrets.STRIPE_WEBHOOK_KEY }}
STRIPE_WEBHOOK_SIGNING_SECRET: ${{ secrets.STRIPE_WEBHOOK_SIGNING_SECRET }}
AWS_KEY: ${{ secrets.AWS_KEY }}
AWS_SECRET: ${{ secrets.AWS_SECRET }}

jobs:
e2e:
Expand Down Expand Up @@ -155,6 +153,10 @@ jobs:
if: steps.api-build.outputs.cache-hit != 'true'
run: npm run build

- # We start MinIO here because Github Actions `services` doesn't support `command`. See https://stackoverflow.com/questions/64031598/creating-a-minios3-container-inside-a-github-actions-yml-file
name: Start MinIO
run: npm run minio -- -d

# Prepare Frontend

- name: Restore node_modules (frontend)
Expand Down
7 changes: 6 additions & 1 deletion config/test.json
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,12 @@
},
"aws": {
"s3": {
"bucket": "opencollective-test"
"bucket": "opencollective-test",
"endpoint": "http://localhost:9000",
"key": "minio",
"secret": "password",
"region": "us-east-1",
"forcePathStyle": true
}
},
"taxForms": {
Expand Down
16 changes: 16 additions & 0 deletions docker-compose.dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,19 @@ services:
MP_DATABASE: /data/mailpit.db
MP_SMTP_AUTH_ACCEPT_ANY: 1
MP_SMTP_AUTH_ALLOW_INSECURE: 1

minio:
image: minio/minio:latest
profiles: [full, dev, uploads]
ports:
- 9000:9000
- 9001:9001
environment:
MINIO_ROOT_USER: minio
MINIO_ROOT_PASSWORD: password
volumes:
- minio:/data
command: server /data --console-address :9001

volumes:
minio:
20 changes: 6 additions & 14 deletions docs/s3.md
Original file line number Diff line number Diff line change
@@ -1,25 +1,17 @@
# S3

In the `development` environment, its possible to use a localhost
(e.g. [MinIO](/~https://github.com/minio/minio), [LocalStack](/~https://github.com/localstack/localstack)) replacement for S3.
[MinIO](/~https://github.com/minio/minio) replacement for S3.

For example, running a MinIO container:
You can achieve this by running the following command:

```sh
# create a volume for persistence (optional)
docker volume create --name minio

docker run -d --name minio \
-e MINIO_ROOT_USER=user \
-e MINIO_ROOT_PASSWORD=password \
-v minio:/data \
-p 9000:9000 \
-p 9001:9001 \
minio/minio server /data --console-address :9001

npm run minio
```

Login to the MinIO console via http://localhost:9000 and create a bucket named `opencollective-dvl` with public access policy. If you intend to work on tax forms, create another bucket named `opencollective-dev-us-tax-forms`.
The first time you run it, you'll need to create the buckets by running the following script:

> NODE_ENV=test npm run script scripts/dev/init-local-s3.ts
The AWS cli can be used by setting the endpoint to your localhost:

Expand Down
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,7 @@
"prettier:write": "npm run prettier -- --write",
"script": "babel-node --extensions .js,.ts $1",
"search": "./scripts/dev/run-docker.sh compose -f docker-compose.dev.yml --profile search up",
"minio": "./scripts/dev/run-docker.sh compose -f docker-compose.dev.yml --profile uploads up",
"sequelize": "babel-node --extensions .js,.ts -- ./node_modules/.bin/sequelize --config config/sequelize-cli.js",
"start": "node ./dist/index.js",
"start:e2e": "TZ=UTC NODE_ENV=production OC_ENV=e2e E2E_TEST=1 npm-run-all db:restore:e2e db:migrate build start:e2e:server",
Expand Down
26 changes: 26 additions & 0 deletions scripts/dev/init-local-s3.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
/**
* A script that connects to the local S3 server and creates the necessary buckets.
*/

import '../../server/env';

import config from 'config';

import s3, { dangerouslyInitNonProductionBuckets } from '../../server/lib/awsS3';

if (!s3) {
throw new Error('S3 service object not initialized');
}

const main = async () => {
if (config.env === 'production') {
throw new Error('This script is not available in the production environment');
}

await dangerouslyInitNonProductionBuckets();
};

main().catch(err => {
console.error(err);
process.exit(1);
});
9 changes: 8 additions & 1 deletion server/controllers/images.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import config from 'config';

import { SUPPORTED_FILE_KINDS } from '../constants/file-kind';
import s3 from '../lib/awsS3';
import errors from '../lib/errors';
import logger from '../lib/logger';
import UploadedFile, { SUPPORTED_FILE_TYPES } from '../models/UploadedFile';
import { MulterFile } from '../types/Multer';

Expand Down Expand Up @@ -56,7 +59,11 @@ export default async function uploadImage(req, res, next) {
}

if (!s3) {
return next(new errors.ServerError('S3 service object not initialized'));
if (config.env !== 'production') {
logger.error('S3 service object not initialized. In dev or test environments, use [minio](/docs/s3.md).');
}

return next(new errors.ServerError('File uploads are currently disabled'));
}

// Rate limit
Expand Down
70 changes: 70 additions & 0 deletions server/lib/awsS3.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,18 @@ import path from 'path';
import {
CopyObjectCommand,
CopyObjectRequest,
CreateBucketCommand,
DeleteBucketCommand,
DeleteObjectCommand,
DeleteObjectOutput,
GetObjectCommand,
HeadBucketCommand,
HeadObjectCommand,
HeadObjectOutput,
ListObjectsV2Command,
ListObjectsV2Output,
ObjectCannedACL,
PutBucketPolicyCommand,
PutObjectCommand,
PutObjectCommandOutput,
S3Client,
Expand Down Expand Up @@ -295,6 +299,72 @@ export const permanentlyDeleteFileFromS3 = async (bucket: string, key: string):
}
};

export const checkBucketExists = async (bucket: string): Promise<boolean> => {
if (!s3) {
throw new Error('S3 is not set');
}
try {
await s3.send(new HeadBucketCommand({ Bucket: bucket }));
return true;
} catch {
return false;
}
};

export const createBucket = async (bucket: string): Promise<void> => {
if (!s3) {
throw new Error('S3 is not set');
}
try {
await s3.send(new CreateBucketCommand({ Bucket: bucket }));
} catch (e) {
logger.error(`Error creating bucket ${bucket}:`, e);
throw e;
}
};

/**
* This function initializes the S3 buckets for non-production environments.
*/
export const dangerouslyInitNonProductionBuckets = async ({
dropExisting = false,
}: { dropExisting?: boolean } = {}) => {
const buckets = [config.aws.s3.bucket, config.taxForms.aws.s3.bucket];

for (const bucket of buckets) {
const bucketExists = await checkBucketExists(bucket);
if (dropExisting && bucketExists) {
logger.info(`Bucket ${bucket} already exists, dropping...`);
await s3.send(new DeleteBucketCommand({ Bucket: bucket }));
} else if (bucketExists) {
logger.info(`Bucket ${bucket} already exists`);
continue;
}

logger.info(`Creating bucket ${bucket}...`);
await createBucket(bucket);

// TODO: We currently create test buckets with public read/write access. We should make sure they use the same policy as the production buckets.
await s3.send(
new PutBucketPolicyCommand({
Bucket: bucket,
Policy: JSON.stringify({
Version: '2012-10-17',
Statement: [
{
Sid: 'PublicReadGetObject',
Effect: 'Allow',
Principal: '*',
Action: ['s3:GetObject', 's3:ListBucket'],
Resource: [`arn:aws:s3:::${bucket}`, `arn:aws:s3:::${bucket}/*`],
},
],
}),
}),
);
}
};

export const checkS3Configured = (): boolean => Boolean(s3);

export default s3;
10 changes: 5 additions & 5 deletions test/server/routes/images.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import fetch from 'node-fetch';
import sharp from 'sharp';
import request from 'supertest';

import { dangerouslyInitNonProductionBuckets } from '../../../server/lib/awsS3';
import { fakeUser } from '../../test-helpers/fake-data';
import { startTestServer, stopTestServer } from '../../test-helpers/server';
import * as utils from '../../utils';
Expand All @@ -18,12 +19,11 @@ const application = utils.data('application');
describe('server/routes/images', () => {
let user, expressApp;

before(async function () {
if (!config.aws.s3.key) {
console.warn('Skipping images tests because AWS credentials are not set');
this.skip();
}
before(async () => {
// Initialize the buckets
await dangerouslyInitNonProductionBuckets();

// Start server
expressApp = await startTestServer();
await utils.resetTestDB();
user = await fakeUser();
Expand Down

0 comments on commit 3a7e785

Please sign in to comment.