|
- import { Injectable } from '@angular/core';
- import { HttpClient, HttpHeaders } from '@angular/common/http';
- import { Observable } from 'rxjs';
-
- export interface GroqMessage {
- role: 'system' | 'user' | 'assistant';
- content: string;
- }
-
- export interface GroqResponse {
- id: string;
- object: string;
- created: number;
- model: string;
- choices: Array<{
- index: number;
- message: {
- role: string;
- content: string;
- };
- finish_reason: string;
- }>;
- usage: {
- prompt_tokens: number;
- completion_tokens: number;
- total_tokens: number;
- };
- }
-
- @Injectable({
- providedIn: 'root'
- })
- export class GroqService {
- private readonly API_URL = 'https://api.groq.com/openai/v1/chat/completions';
- private readonly API_KEY = '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'; // Remplacez par votre clé API
-
- constructor(private http: HttpClient) {}
-
- /**
- * Envoie un message au chatbot Groq
- * @param messages Historique complet de la conversation
- * @param model Modèle à utiliser (par défaut: llama-3.3-70b-versatile)
- * @returns Observable avec la réponse de Groq
- */
- sendMessage(
- messages: GroqMessage[],
- model: string = 'openai/gpt-oss-120b'
- ): Observable<GroqResponse> {
- const headers = new HttpHeaders({
- 'Content-Type': 'application/json',
- 'Authorization': `Bearer ${this.API_KEY}`
- });
-
- const body = {
- model: model,
- messages: messages,
- temperature: 0.7,
- max_tokens: 2048,
- top_p: 1,
- stream: false
- };
-
- return this.http.post<GroqResponse>(this.API_URL, body, { headers });
- }
-
- /**
- * Envoie un message avec streaming (affichage progressif)
- * @param messages Historique de la conversation
- * @param onChunk Callback appelé pour chaque morceau de texte reçu
- * @param onComplete Callback appelé quand la génération est terminée
- * @param onError Callback appelé en cas d'erreur
- */
- sendMessageStream(
- messages: GroqMessage[],
- onChunk: (text: string) => void,
- onComplete: () => void,
- onError: (error: any) => void
- ): void {
- const headers = new HttpHeaders({
- 'Content-Type': 'application/json',
- 'Authorization': `Bearer ${this.API_KEY}`
- });
-
- const body = {
- model: 'openai/gpt-oss-120b',
- messages: messages,
- temperature: 0.7,
- max_tokens: 2048,
- stream: true
- };
-
- fetch(this.API_URL, {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json',
- 'Authorization': `Bearer ${this.API_KEY}`
- },
- body: JSON.stringify(body)
- })
- .then(response => {
- if (!response.ok) {
- throw new Error(`HTTP error! status: ${response.status}`);
- }
- return response.body;
- })
- .then(body => {
- const reader = body!.getReader();
- const decoder = new TextDecoder();
-
- const processStream = ({ done, value }: ReadableStreamReadResult<Uint8Array>): Promise<void> => {
- if (done) {
- onComplete();
- return Promise.resolve();
- }
-
- const chunk = decoder.decode(value);
- const lines = chunk.split('\n').filter(line => line.trim() !== '');
-
- for (const line of lines) {
- if (line.startsWith('data: ')) {
- const data = line.slice(6);
- if (data === '[DONE]') {
- onComplete();
- return Promise.resolve();
- }
-
- try {
- const parsed = JSON.parse(data);
- const content = parsed.choices[0]?.delta?.content;
- if (content) {
- onChunk(content);
- }
- } catch (e) {
- console.error('Erreur de parsing JSON:', e);
- }
- }
- }
-
- return reader.read().then(processStream);
- };
-
- return reader.read().then(processStream);
- })
- .catch(error => {
- console.error('Erreur lors de l\'appel à l\'API:', error);
- onError(error);
- });
- }
-
- /**
- * Vérifie si la clé API est configurée
- */
- isConfigured(): boolean {
- return true;
- }
- }
|