Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .spellcheck.dict.txt
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ Homebrew
HTTP
HTTPS
IDFA
Imagen
installable
integrations
Intellisense
Expand Down
34 changes: 34 additions & 0 deletions docs/ai/usage/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -394,6 +394,40 @@ function App() {
}
```

## Generating images from text

You can ask an Imagen model to generate a single image or multiple image by prompting with text:

```js
<Button
title="generate image using Imagen model"
onPress={async () => {
const app = getApp();
const ai = getAI(app);

const model = getImagenModel(ai, {
model: 'imagen-3.0-generate-002',
// Can also Configure the model to generate multiple images for each request
// See: https://firebase.google.com/docs/ai-logic/model-parameters
// generationConfig: {
// numberOfImages: 4
// }
});

const prompt = 'Generate an image of London bridge with sharks in the water at sunset';

const result = await model.generateImages(prompt);
// creates a base64 encoded image you can render
const image = result.images[0].bytesBase64Encoded;

// If you wish to have an image generated and uploaded to your Firebase Storage bucket,
// you can do the following instead:
const gcsURI = 'gs://[PROJECT NAME].appspot.com/[DIRECTORY TO STORE IMAGE]';
const result = await model.generateImagesGCS(prompt, gcsURI);
}}
/>
```

## Getting ready for production

For mobile and web apps, you need to protect the Gemini API and your project resources (like tuned models) from abuse by unauthorized clients. You can use Firebase App Check to verify that all API calls are from your actual app. See [Firebase docs for further information](https://firebase.google.com/docs/ai-logic/app-check).
Expand Down
138 changes: 138 additions & 0 deletions packages/ai/__tests__/ai-model.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { describe, expect, it } from '@jest/globals';
import { type ReactNativeFirebase } from '@react-native-firebase/app';
import { AI, AIErrorCode } from '../lib/public-types';
import { AIModel } from '../lib/models/ai-model';
import { AIError } from '../lib/errors';
import { VertexAIBackend } from '../lib/backend';

/**
* A class that extends AIModel that allows us to test the protected constructor.
*/
class TestModel extends AIModel {
constructor(ai: AI, modelName: string) {
super(ai, modelName);
}
}

const fakeAI: AI = {
app: {
name: 'DEFAULT',
automaticDataCollectionEnabled: true,
options: {
apiKey: 'key',
projectId: 'my-project',
appId: 'my-appid',
},
} as ReactNativeFirebase.FirebaseApp,
backend: new VertexAIBackend('us-central1'),
location: 'us-central1',
};

describe('AIModel', () => {
it('handles plain model name', () => {
const testModel = new TestModel(fakeAI, 'my-model');

expect(testModel.model).toBe('publishers/google/models/my-model');
});

it('handles models/ prefixed model name', () => {
const testModel = new TestModel(fakeAI, 'models/my-model');

expect(testModel.model).toBe('publishers/google/models/my-model');
});

it('handles full model name', () => {
const testModel = new TestModel(fakeAI, 'publishers/google/models/my-model');

expect(testModel.model).toBe('publishers/google/models/my-model');
});

it('handles prefixed tuned model name', () => {
const testModel = new TestModel(fakeAI, 'tunedModels/my-model');

expect(testModel.model).toBe('tunedModels/my-model');
});

it('throws if not passed an api key', () => {
const fakeAI: AI = {
app: {
name: 'DEFAULT',
automaticDataCollectionEnabled: true,
options: {
projectId: 'my-project',
},
} as ReactNativeFirebase.FirebaseApp,
backend: new VertexAIBackend('us-central1'),
location: 'us-central1',
};
expect(() => {
new TestModel(fakeAI, 'my-model');
}).toThrow();
try {
new TestModel(fakeAI, 'my-model');
} catch (e) {
expect((e as AIError).code).toBe(AIErrorCode.NO_API_KEY);
}
});

it('throws if not passed a project ID', () => {
const fakeAI: AI = {
app: {
name: 'DEFAULT',
automaticDataCollectionEnabled: true,
options: {
apiKey: 'key',
},
} as ReactNativeFirebase.FirebaseApp,
backend: new VertexAIBackend('us-central1'),
location: 'us-central1',
};
expect(() => {
new TestModel(fakeAI, 'my-model');
}).toThrow();
try {
new TestModel(fakeAI, 'my-model');
} catch (e) {
expect((e as AIError).code).toBe(AIErrorCode.NO_PROJECT_ID);
}
});

it('throws if not passed an app ID', () => {
const fakeAI: AI = {
app: {
name: 'DEFAULT',
automaticDataCollectionEnabled: true,
options: {
apiKey: 'key',
projectId: 'my-project',
},
} as ReactNativeFirebase.FirebaseApp,
backend: new VertexAIBackend('us-central1'),
location: 'us-central1',
};
expect(() => {
new TestModel(fakeAI, 'my-model');
}).toThrow();
try {
new TestModel(fakeAI, 'my-model');
} catch (e) {
expect((e as AIError).code).toBe(AIErrorCode.NO_APP_ID);
}
});
});
4 changes: 2 additions & 2 deletions packages/ai/__tests__/count-tokens.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import { countTokens } from '../lib/methods/count-tokens';
import { CountTokensRequest, RequestOptions } from '../lib/types';
import { ApiSettings } from '../lib/types/internal';
import { Task } from '../lib/requests/request';
import { GoogleAIBackend, VertexAIBackend } from '../lib/backend';
import { GoogleAIBackend } from '../lib/backend';
import { SpiedFunction } from 'jest-mock';
import { mapCountTokensRequest } from '../lib/googleai-mappers';

Expand All @@ -30,7 +30,7 @@ const fakeApiSettings: ApiSettings = {
project: 'my-project',
location: 'us-central1',
appId: '',
backend: new VertexAIBackend(),
backend: new GoogleAIBackend(),
};

const fakeGoogleAIApiSettings: ApiSettings = {
Expand Down
162 changes: 162 additions & 0 deletions packages/ai/__tests__/imagen-model.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
/**
* @license
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { describe, expect, it, jest, afterEach } from '@jest/globals';
import { type ReactNativeFirebase } from '@react-native-firebase/app';
import { ImagenModel } from '../lib/models/imagen-model';
import {
ImagenAspectRatio,
ImagenPersonFilterLevel,
ImagenSafetyFilterLevel,
AI,
AIErrorCode,
} from '../lib/public-types';
import * as request from '../lib/requests/request';
import { AIError } from '../lib/errors';
import { BackendName, getMockResponse } from './test-utils/mock-response';
import { VertexAIBackend } from '../lib/backend';

const fakeAI: AI = {
app: {
name: 'DEFAULT',
automaticDataCollectionEnabled: true,
options: {
apiKey: 'key',
projectId: 'my-project',
appId: 'my-appid',
},
} as ReactNativeFirebase.FirebaseApp,
backend: new VertexAIBackend('us-central1'),
location: 'us-central1',
};

describe('ImagenModel', () => {
afterEach(() => {
jest.restoreAllMocks();
});

it('generateImages makes a request to predict with default parameters', async () => {
const mockResponse = getMockResponse(
BackendName.VertexAI,
'unary-success-generate-images-base64.json',
);
const makeRequestStub = jest
.spyOn(request, 'makeRequest')
.mockResolvedValue(mockResponse as Response);

const imagenModel = new ImagenModel(fakeAI, {
model: 'my-model',
});
const prompt = 'A photorealistic image of a toy boat at sea.';
await imagenModel.generateImages(prompt);
expect(makeRequestStub).toHaveBeenCalledWith(
'publishers/google/models/my-model',
request.Task.PREDICT,
expect.anything(),
false,
expect.stringMatching(new RegExp(`"prompt":"${prompt}"`)),
undefined,
);
expect(makeRequestStub).toHaveBeenCalledWith(
'publishers/google/models/my-model',
request.Task.PREDICT,
expect.anything(),
false,
expect.stringContaining(`"sampleCount":1`),
undefined,
);
});

it('generateImages makes a request to predict with generation config and safety settings', async () => {
const imagenModel = new ImagenModel(fakeAI, {
model: 'my-model',
generationConfig: {
negativePrompt: 'do not hallucinate',
numberOfImages: 4,
aspectRatio: ImagenAspectRatio.LANDSCAPE_16x9,
imageFormat: { mimeType: 'image/jpeg', compressionQuality: 75 },
addWatermark: true,
},
safetySettings: {
safetyFilterLevel: ImagenSafetyFilterLevel.BLOCK_ONLY_HIGH,
personFilterLevel: ImagenPersonFilterLevel.ALLOW_ADULT,
},
});

const mockResponse = getMockResponse(
BackendName.VertexAI,
'unary-success-generate-images-base64.json',
);
const makeRequestStub = jest
.spyOn(request, 'makeRequest')
.mockResolvedValue(mockResponse as Response);
const prompt = 'A photorealistic image of a toy boat at sea.';
await imagenModel.generateImages(prompt);
expect(makeRequestStub).toHaveBeenCalledWith(
'publishers/google/models/my-model',
request.Task.PREDICT,
expect.anything(),
false,
expect.stringContaining(`"negativePrompt":"${imagenModel.generationConfig?.negativePrompt}"`),
undefined,
);
expect(makeRequestStub).toHaveBeenCalledWith(
'publishers/google/models/my-model',
request.Task.PREDICT,
expect.anything(),
false,
expect.stringContaining(`"sampleCount":${imagenModel.generationConfig?.numberOfImages}`),
undefined,
);
expect(makeRequestStub).toHaveBeenCalledWith(
'publishers/google/models/my-model',
request.Task.PREDICT,
expect.anything(),
false,
expect.stringContaining(`"aspectRatio":"${imagenModel.generationConfig?.aspectRatio}"`),
undefined,
);
});

it('throws if prompt blocked', async () => {
const mockResponse = getMockResponse(
BackendName.VertexAI,
'unary-failure-generate-images-prompt-blocked.json',
);

jest.spyOn(globalThis, 'fetch').mockResolvedValue({
ok: false,
status: 400,
statusText: 'Bad Request',
json: mockResponse.json,
} as Response);

const imagenModel = new ImagenModel(fakeAI, {
model: 'my-model',
});
await expect(imagenModel.generateImages('some inappropriate prompt.')).rejects.toThrow();

try {
await imagenModel.generateImages('some inappropriate prompt.');
} catch (e) {
expect((e as AIError).code).toBe(AIErrorCode.FETCH_ERROR);
expect((e as AIError).message).toContain('400');
expect((e as AIError).message).toContain(
"Image generation failed with the following error: The prompt could not be submitted. This prompt contains sensitive words that violate Google's Responsible AI practices. Try rephrasing the prompt. If you think this was an error, send feedback.",
);
}
});
});
Loading
Loading