// Import necessary modules
import Replicate from 'replicate';
import { action, internalAction, mutation } from './_generated/server';
import { api, internal } from './_generated/api';
import { v } from 'convex/values';
export const runLlama2 = action({
handler: async ({},{prompt,}) => {
// Initialize Replicate with your API token
const replicate = new Replicate({
auth: ,
});
try {
// Run Llama 2 with Replicate
const output = await replicate.run(
'meta/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1',
{
input: {
prompt:prompt,
},
}
);
console.log('Llama 2 output:', output);
return output;
} catch (error) {
console.error('Error running Llama 2:', error);
throw new Error('Failed to run Llama 2');
}
},
});
// Import necessary modules
import Replicate from 'replicate';
import { action, internalAction, mutation } from './_generated/server';
import { api, internal } from './_generated/api';
import { v } from 'convex/values';
export const runLlama2 = action({
handler: async ({},{prompt,}) => {
// Initialize Replicate with your API token
const replicate = new Replicate({
auth: ,
});
try {
// Run Llama 2 with Replicate
const output = await replicate.run(
'meta/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1',
{
input: {
prompt:prompt,
},
}
);
console.log('Llama 2 output:', output);
return output;
} catch (error) {
console.error('Error running Llama 2:', error);
throw new Error('Failed to run Llama 2');
}
},
});