Skip to content

Commit 3bac3d8

Browse files
russellwheatleydlarocquehsubox76tanzimfhjsoref
authored andcommitted
feat(firebase-ai): Imagen model support
- includes docs update with example usage - includes unit tests and a local manual test in e2e app ---- Co-authored-by: Daniel La Rocque <[email protected]> Co-authored-by: hsubox76 <[email protected]> Co-authored-by: Tanzim Hossain <[email protected]> Co-authored-by: Josh Soref <[email protected]> Co-authored-by: DellaBitta <[email protected]>
1 parent a1a50e6 commit 3bac3d8

21 files changed

+1346
-52
lines changed

.spellcheck.dict.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ Homebrew
9191
HTTP
9292
HTTPS
9393
IDFA
94+
Imagen
9495
installable
9596
integrations
9697
Intellisense

docs/ai/usage/index.md

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -394,6 +394,40 @@ function App() {
394394
}
395395
```
396396

397+
## Generating images from text
398+
399+
You can ask an Imagen model to generate a single image or multiple image by prompting with text:
400+
401+
```js
402+
<Button
403+
title="generate image using Imagen model"
404+
onPress={async () => {
405+
const app = getApp();
406+
const ai = getAI(app);
407+
408+
const model = getImagenModel(ai, {
409+
model: 'imagen-3.0-generate-002',
410+
// Can also Configure the model to generate multiple images for each request
411+
// See: https://firebase.google.com/docs/ai-logic/model-parameters
412+
// generationConfig: {
413+
// numberOfImages: 4
414+
// }
415+
});
416+
417+
const prompt = 'Generate an image of London bridge with sharks in the water at sunset';
418+
419+
const result = await model.generateImages(prompt);
420+
// creates a base64 encoded image you can render
421+
const image = result.images[0].bytesBase64Encoded;
422+
423+
// If you wish to have an image generated and uploaded to your Firebase Storage bucket,
424+
// you can do the following instead:
425+
const gcsURI = 'gs://[PROJECT NAME].appspot.com/[DIRECTORY TO STORE IMAGE]';
426+
const result = await model.generateImagesGCS(prompt, gcsURI);
427+
}}
428+
/>
429+
```
430+
397431
## Getting ready for production
398432

399433
For mobile and web apps, you need to protect the Gemini API and your project resources (like tuned models) from abuse by unauthorized clients. You can use Firebase App Check to verify that all API calls are from your actual app. See [Firebase docs for further information](https://firebase.google.com/docs/ai-logic/app-check).
Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
/**
2+
* @license
3+
* Copyright 2025 Google LLC
4+
*
5+
* Licensed under the Apache License, Version 2.0 (the "License");
6+
* you may not use this file except in compliance with the License.
7+
* You may obtain a copy of the License at
8+
*
9+
* http://www.apache.org/licenses/LICENSE-2.0
10+
*
11+
* Unless required by applicable law or agreed to in writing, software
12+
* distributed under the License is distributed on an "AS IS" BASIS,
13+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
* See the License for the specific language governing permissions and
15+
* limitations under the License.
16+
*/
17+
import { describe, expect, it } from '@jest/globals';
18+
import { type ReactNativeFirebase } from '@react-native-firebase/app';
19+
import { AI, AIErrorCode } from '../lib/public-types';
20+
import { AIModel } from '../lib/models/ai-model';
21+
import { AIError } from '../lib/errors';
22+
import { VertexAIBackend } from '../lib/backend';
23+
24+
/**
25+
* A class that extends AIModel that allows us to test the protected constructor.
26+
*/
27+
class TestModel extends AIModel {
28+
constructor(ai: AI, modelName: string) {
29+
super(ai, modelName);
30+
}
31+
}
32+
33+
const fakeAI: AI = {
34+
app: {
35+
name: 'DEFAULT',
36+
automaticDataCollectionEnabled: true,
37+
options: {
38+
apiKey: 'key',
39+
projectId: 'my-project',
40+
appId: 'my-appid',
41+
},
42+
} as ReactNativeFirebase.FirebaseApp,
43+
backend: new VertexAIBackend('us-central1'),
44+
location: 'us-central1',
45+
};
46+
47+
describe('AIModel', () => {
48+
it('handles plain model name', () => {
49+
const testModel = new TestModel(fakeAI, 'my-model');
50+
51+
expect(testModel.model).toBe('publishers/google/models/my-model');
52+
});
53+
54+
it('handles models/ prefixed model name', () => {
55+
const testModel = new TestModel(fakeAI, 'models/my-model');
56+
57+
expect(testModel.model).toBe('publishers/google/models/my-model');
58+
});
59+
60+
it('handles full model name', () => {
61+
const testModel = new TestModel(fakeAI, 'publishers/google/models/my-model');
62+
63+
expect(testModel.model).toBe('publishers/google/models/my-model');
64+
});
65+
66+
it('handles prefixed tuned model name', () => {
67+
const testModel = new TestModel(fakeAI, 'tunedModels/my-model');
68+
69+
expect(testModel.model).toBe('tunedModels/my-model');
70+
});
71+
72+
it('throws if not passed an api key', () => {
73+
const fakeAI: AI = {
74+
app: {
75+
name: 'DEFAULT',
76+
automaticDataCollectionEnabled: true,
77+
options: {
78+
projectId: 'my-project',
79+
},
80+
} as ReactNativeFirebase.FirebaseApp,
81+
backend: new VertexAIBackend('us-central1'),
82+
location: 'us-central1',
83+
};
84+
expect(() => {
85+
new TestModel(fakeAI, 'my-model');
86+
}).toThrow();
87+
try {
88+
new TestModel(fakeAI, 'my-model');
89+
} catch (e) {
90+
expect((e as AIError).code).toBe(AIErrorCode.NO_API_KEY);
91+
}
92+
});
93+
94+
it('throws if not passed a project ID', () => {
95+
const fakeAI: AI = {
96+
app: {
97+
name: 'DEFAULT',
98+
automaticDataCollectionEnabled: true,
99+
options: {
100+
apiKey: 'key',
101+
},
102+
} as ReactNativeFirebase.FirebaseApp,
103+
backend: new VertexAIBackend('us-central1'),
104+
location: 'us-central1',
105+
};
106+
expect(() => {
107+
new TestModel(fakeAI, 'my-model');
108+
}).toThrow();
109+
try {
110+
new TestModel(fakeAI, 'my-model');
111+
} catch (e) {
112+
expect((e as AIError).code).toBe(AIErrorCode.NO_PROJECT_ID);
113+
}
114+
});
115+
116+
it('throws if not passed an app ID', () => {
117+
const fakeAI: AI = {
118+
app: {
119+
name: 'DEFAULT',
120+
automaticDataCollectionEnabled: true,
121+
options: {
122+
apiKey: 'key',
123+
projectId: 'my-project',
124+
},
125+
} as ReactNativeFirebase.FirebaseApp,
126+
backend: new VertexAIBackend('us-central1'),
127+
location: 'us-central1',
128+
};
129+
expect(() => {
130+
new TestModel(fakeAI, 'my-model');
131+
}).toThrow();
132+
try {
133+
new TestModel(fakeAI, 'my-model');
134+
} catch (e) {
135+
expect((e as AIError).code).toBe(AIErrorCode.NO_APP_ID);
136+
}
137+
});
138+
});

packages/ai/__tests__/count-tokens.test.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ import { countTokens } from '../lib/methods/count-tokens';
2121
import { CountTokensRequest, RequestOptions } from '../lib/types';
2222
import { ApiSettings } from '../lib/types/internal';
2323
import { Task } from '../lib/requests/request';
24-
import { GoogleAIBackend, VertexAIBackend } from '../lib/backend';
24+
import { GoogleAIBackend } from '../lib/backend';
2525
import { SpiedFunction } from 'jest-mock';
2626
import { mapCountTokensRequest } from '../lib/googleai-mappers';
2727

@@ -30,7 +30,7 @@ const fakeApiSettings: ApiSettings = {
3030
project: 'my-project',
3131
location: 'us-central1',
3232
appId: '',
33-
backend: new VertexAIBackend(),
33+
backend: new GoogleAIBackend(),
3434
};
3535

3636
const fakeGoogleAIApiSettings: ApiSettings = {
Lines changed: 162 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,162 @@
1+
/**
2+
* @license
3+
* Copyright 2025 Google LLC
4+
*
5+
* Licensed under the Apache License, Version 2.0 (the "License");
6+
* you may not use this file except in compliance with the License.
7+
* You may obtain a copy of the License at
8+
*
9+
* http://www.apache.org/licenses/LICENSE-2.0
10+
*
11+
* Unless required by applicable law or agreed to in writing, software
12+
* distributed under the License is distributed on an "AS IS" BASIS,
13+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
* See the License for the specific language governing permissions and
15+
* limitations under the License.
16+
*/
17+
import { describe, expect, it, jest, afterEach } from '@jest/globals';
18+
import { type ReactNativeFirebase } from '@react-native-firebase/app';
19+
import { ImagenModel } from '../lib/models/imagen-model';
20+
import {
21+
ImagenAspectRatio,
22+
ImagenPersonFilterLevel,
23+
ImagenSafetyFilterLevel,
24+
AI,
25+
AIErrorCode,
26+
} from '../lib/public-types';
27+
import * as request from '../lib/requests/request';
28+
import { AIError } from '../lib/errors';
29+
import { BackendName, getMockResponse } from './test-utils/mock-response';
30+
import { VertexAIBackend } from '../lib/backend';
31+
32+
const fakeAI: AI = {
33+
app: {
34+
name: 'DEFAULT',
35+
automaticDataCollectionEnabled: true,
36+
options: {
37+
apiKey: 'key',
38+
projectId: 'my-project',
39+
appId: 'my-appid',
40+
},
41+
} as ReactNativeFirebase.FirebaseApp,
42+
backend: new VertexAIBackend('us-central1'),
43+
location: 'us-central1',
44+
};
45+
46+
describe('ImagenModel', () => {
47+
afterEach(() => {
48+
jest.restoreAllMocks();
49+
});
50+
51+
it('generateImages makes a request to predict with default parameters', async () => {
52+
const mockResponse = getMockResponse(
53+
BackendName.VertexAI,
54+
'unary-success-generate-images-base64.json',
55+
);
56+
const makeRequestStub = jest
57+
.spyOn(request, 'makeRequest')
58+
.mockResolvedValue(mockResponse as Response);
59+
60+
const imagenModel = new ImagenModel(fakeAI, {
61+
model: 'my-model',
62+
});
63+
const prompt = 'A photorealistic image of a toy boat at sea.';
64+
await imagenModel.generateImages(prompt);
65+
expect(makeRequestStub).toHaveBeenCalledWith(
66+
'publishers/google/models/my-model',
67+
request.Task.PREDICT,
68+
expect.anything(),
69+
false,
70+
expect.stringMatching(new RegExp(`"prompt":"${prompt}"`)),
71+
undefined,
72+
);
73+
expect(makeRequestStub).toHaveBeenCalledWith(
74+
'publishers/google/models/my-model',
75+
request.Task.PREDICT,
76+
expect.anything(),
77+
false,
78+
expect.stringContaining(`"sampleCount":1`),
79+
undefined,
80+
);
81+
});
82+
83+
it('generateImages makes a request to predict with generation config and safety settings', async () => {
84+
const imagenModel = new ImagenModel(fakeAI, {
85+
model: 'my-model',
86+
generationConfig: {
87+
negativePrompt: 'do not hallucinate',
88+
numberOfImages: 4,
89+
aspectRatio: ImagenAspectRatio.LANDSCAPE_16x9,
90+
imageFormat: { mimeType: 'image/jpeg', compressionQuality: 75 },
91+
addWatermark: true,
92+
},
93+
safetySettings: {
94+
safetyFilterLevel: ImagenSafetyFilterLevel.BLOCK_ONLY_HIGH,
95+
personFilterLevel: ImagenPersonFilterLevel.ALLOW_ADULT,
96+
},
97+
});
98+
99+
const mockResponse = getMockResponse(
100+
BackendName.VertexAI,
101+
'unary-success-generate-images-base64.json',
102+
);
103+
const makeRequestStub = jest
104+
.spyOn(request, 'makeRequest')
105+
.mockResolvedValue(mockResponse as Response);
106+
const prompt = 'A photorealistic image of a toy boat at sea.';
107+
await imagenModel.generateImages(prompt);
108+
expect(makeRequestStub).toHaveBeenCalledWith(
109+
'publishers/google/models/my-model',
110+
request.Task.PREDICT,
111+
expect.anything(),
112+
false,
113+
expect.stringContaining(`"negativePrompt":"${imagenModel.generationConfig?.negativePrompt}"`),
114+
undefined,
115+
);
116+
expect(makeRequestStub).toHaveBeenCalledWith(
117+
'publishers/google/models/my-model',
118+
request.Task.PREDICT,
119+
expect.anything(),
120+
false,
121+
expect.stringContaining(`"sampleCount":${imagenModel.generationConfig?.numberOfImages}`),
122+
undefined,
123+
);
124+
expect(makeRequestStub).toHaveBeenCalledWith(
125+
'publishers/google/models/my-model',
126+
request.Task.PREDICT,
127+
expect.anything(),
128+
false,
129+
expect.stringContaining(`"aspectRatio":"${imagenModel.generationConfig?.aspectRatio}"`),
130+
undefined,
131+
);
132+
});
133+
134+
it('throws if prompt blocked', async () => {
135+
const mockResponse = getMockResponse(
136+
BackendName.VertexAI,
137+
'unary-failure-generate-images-prompt-blocked.json',
138+
);
139+
140+
jest.spyOn(globalThis, 'fetch').mockResolvedValue({
141+
ok: false,
142+
status: 400,
143+
statusText: 'Bad Request',
144+
json: mockResponse.json,
145+
} as Response);
146+
147+
const imagenModel = new ImagenModel(fakeAI, {
148+
model: 'my-model',
149+
});
150+
await expect(imagenModel.generateImages('some inappropriate prompt.')).rejects.toThrow();
151+
152+
try {
153+
await imagenModel.generateImages('some inappropriate prompt.');
154+
} catch (e) {
155+
expect((e as AIError).code).toBe(AIErrorCode.FETCH_ERROR);
156+
expect((e as AIError).message).toContain('400');
157+
expect((e as AIError).message).toContain(
158+
"Image generation failed with the following error: The prompt could not be submitted. This prompt contains sensitive words that violate Google's Responsible AI practices. Try rephrasing the prompt. If you think this was an error, send feedback.",
159+
);
160+
}
161+
});
162+
});

0 commit comments

Comments
 (0)