- verifica email se non è stata verificata (componente)

- altri aggiornamenti grafica PAGERIS.
- OLLAMA AI
This commit is contained in:
Surya Paolo
2025-12-11 18:34:39 +01:00
parent 6fdb101092
commit 89a8d10eae
44 changed files with 7915 additions and 3565 deletions

View File

@@ -0,0 +1,569 @@
<template>
<q-card
class="ollama-chat"
:style="{ height: height }"
>
<!-- Header -->
<q-card-section class="q-py-sm bg-primary text-white">
<div class="row items-center justify-between">
<div class="row items-center q-gutter-sm">
<q-badge
:color="isConnected ? 'green' : 'red'"
rounded
/>
<span class="text-subtitle1 text-weight-medium"
>{{ title }} - {{ settings.model }}</span
>
</div>
<div class="row q-gutter-xs">
<q-btn
flat
dense
round
icon="settings"
@click="showSettings = true"
>
<q-tooltip>Impostazioni</q-tooltip>
</q-btn>
<q-btn
flat
dense
round
icon="delete_sweep"
@click="clearChat"
>
<q-tooltip>Pulisci Chat</q-tooltip>
</q-btn>
</div>
</div>
</q-card-section>
<!-- Messages -->
<q-card-section
ref="messagesContainer"
class="messages-container q-pa-md"
>
<div
v-if="messages.length === 0"
class="text-center text-grey-6 q-py-xl"
>
<q-icon
name="chat"
size="64px"
color="grey-4"
/>
<p class="q-mt-md">Inizia una conversazione!</p>
</div>
<div
v-for="(msg, index) in messages"
:key="index"
class="message-wrapper q-mb-md"
:class="msg.role === 'user' ? 'text-right' : 'text-left'"
>
<div
class="message-bubble q-pa-sm q-px-md"
:class="msg.role === 'user' ? 'bg-primary text-white' : 'bg-grey-3 text-dark'"
>
<!-- Puntini solo se streaming E contenuto vuoto -->
<div
v-if="msg.role === 'assistant' && msg.isStreaming && !msg.content"
class="typing-indicator"
>
<span></span><span></span><span></span>
</div>
<!-- Contenuto messaggio (anche durante streaming se già arrivato) -->
<div
v-else
class="message-content"
>
<div v-html="formatMessage(msg.content)"></div>
</div>
</div>
<div class="text-caption text-grey-6 q-mt-xs">
{{ msg.role === 'user' ? 'Tu' : modelName }} {{ formatTime(msg.timestamp) }}
</div>
</div>
</q-card-section>
<!-- Input -->
<q-card-section class="q-pa-sm border-top">
<div class="row q-gutter-sm items-end">
<q-input
v-model="inputMessage"
:disable="isGenerating"
outlined
dense
autogrow
class="col"
placeholder="Scrivi un messaggio..."
@keydown.enter.prevent="handleEnter"
>
<template v-slot:append>
<q-btn
v-if="isGenerating"
flat
dense
round
icon="stop"
color="negative"
@click="stopGeneration"
>
<q-tooltip>Ferma generazione</q-tooltip>
</q-btn>
</template>
</q-input>
<q-btn
:loading="isGenerating"
:disable="!inputMessage.trim() || isGenerating"
color="primary"
icon="send"
@click="sendMessage"
>
<q-tooltip>Invia messaggio</q-tooltip>
</q-btn>
</div>
</q-card-section>
<!-- Settings Dialog -->
<q-dialog v-model="showSettings">
<q-card style="min-width: 350px">
<q-card-section class="row items-center q-pb-none">
<div class="text-h6">Impostazioni</div>
<q-space />
<q-btn
icon="close"
flat
round
dense
v-close-popup
/>
</q-card-section>
<q-card-section class="q-gutter-md">
<q-input
v-model="settings.baseUrl"
outlined
label="Ollama URL"
hint="URL del server Ollama"
/>
<q-select
v-model="settings.model"
:options="availableModels"
outlined
label="Modello"
emit-value
map-options
/>
<div>
<div class="text-caption q-mb-sm">
Temperatura: {{ settings.temperature }}
</div>
<q-slider
v-model="settings.temperature"
:min="0"
:max="2"
:step="0.1"
label
color="primary"
/>
</div>
<q-input
v-model="settings.systemPrompt"
outlined
type="textarea"
label="System Prompt (opzionale)"
hint="Istruzioni per il comportamento dell'AI"
/>
</q-card-section>
<q-card-actions align="right">
<q-btn
flat
label="Annulla"
color="grey"
v-close-popup
/>
<q-btn
flat
label="Salva"
color="primary"
@click="saveSettings"
v-close-popup
/>
</q-card-actions>
</q-card>
</q-dialog>
</q-card>
</template>
<script>
import { ref, reactive, computed, onMounted, nextTick, watch } from 'vue';
import OllamaService from './OllamaService.js';
import { tools } from '@tools';
export default {
name: 'OllamaChat',
props: {
title: {
type: String,
default: 'Chat AI',
},
height: {
type: String,
default: '600px',
},
baseUrl: {
type: String,
default: 'http://localhost:11434',
},
model: {
type: String,
default: '',
},
temperature: {
type: Number,
default: 0.7,
},
systemPrompt: {
type: String,
default: '',
},
initialMessages: {
type: Array,
default: () => [],
},
},
emits: ['message-sent', 'response-received', 'error', 'settings-changed'],
setup(props, { emit }) {
// State
const messages = ref([...props.initialMessages]);
const inputMessage = ref('');
const isGenerating = ref(false);
const isConnected = ref(false);
const showSettings = ref(false);
const messagesContainer = ref(null);
const abortController = ref(null);
const availableModels = ref([]);
// Settings
const settings = reactive({
baseUrl: props.baseUrl,
model: props.model,
temperature: props.temperature,
systemPrompt: props.systemPrompt,
});
// Ollama service instance
const ollama = new OllamaService(settings.baseUrl);
// Computed
const modelName = computed(() => settings.model);
// Methods
const scrollToBottom = async () => {
await nextTick();
if (messagesContainer.value) {
const container = messagesContainer.value.$el || messagesContainer.value;
container.scrollTop = container.scrollHeight;
}
};
const formatMessage = (content) => {
if (!content) return '';
return content
.replace(
/```(\w+)?\n([\s\S]*?)```/g,
'<pre class="code-block"><code>$2</code></pre>'
)
.replace(/`([^`]+)`/g, '<code class="inline-code">$1</code>')
.replace(/\*\*([^*]+)\*\*/g, '<strong>$1</strong>')
.replace(/\*([^*]+)\*/g, '<em>$1</em>')
.replace(/\n/g, '<br>');
};
const formatTime = (timestamp) => {
if (!timestamp) return '';
return new Date(timestamp).toLocaleTimeString('it-IT', {
hour: '2-digit',
minute: '2-digit',
});
};
const handleEnter = (event) => {
if (!event.shiftKey) {
sendMessage();
}
};
const sendMessage = async () => {
const content = inputMessage.value.trim();
if (!content || isGenerating.value) return;
// Add user message
const userMessage = {
role: 'user',
content,
timestamp: new Date(),
};
messages.value.push(userMessage);
inputMessage.value = '';
emit('message-sent', userMessage);
// Add placeholder for assistant
const assistantMessage = {
role: 'assistant',
content: '',
timestamp: new Date(),
isStreaming: true,
};
messages.value.push(assistantMessage);
const assistantIndex = messages.value.length - 1;
isGenerating.value = true;
abortController.value = new AbortController();
try {
ollama.setBaseUrl(settings.baseUrl);
const chatMessages = messages.value
.filter((m) => !m.isStreaming)
.map((m) => ({ role: m.role, content: m.content }));
await ollama.streamChat(
{
model: settings.model,
messages: chatMessages,
temperature: settings.temperature,
system: settings.systemPrompt || undefined,
},
(chunk, full) => {
messages.value[assistantIndex] = {
...messages.value[assistantIndex],
content: full,
isStreaming: true,
};
assistantMessage.isStreaming = true;
scrollToBottom();
},
(fullContent) => {
assistantMessage.content = fullContent;
assistantMessage.isStreaming = false;
assistantMessage.timestamp = new Date();
emit('response-received', assistantMessage);
}
);
isConnected.value = true;
} catch (error) {
assistantMessage.content = `❌ Errore: ${error.message}`;
assistantMessage.isStreaming = false;
isConnected.value = false;
emit('error', error);
}
isGenerating.value = false;
scrollToBottom();
};
const stopGeneration = () => {
if (abortController.value) {
abortController.value.abort();
isGenerating.value = false;
}
};
const clearChat = () => {
messages.value = [];
};
const loadModels = async () => {
try {
ollama.setBaseUrl(settings.baseUrl);
const models = await ollama.listModels();
availableModels.value = models.map((m) => ({
label: m.name,
value: m.name,
}));
isConnected.value = true;
// prende dal cookie salvato
const savedModel = tools.getCookie('ollama_model');
if (savedModel) {
settings.model = savedModel;
}
const modelIndex = availableModels.value.findIndex(
(m) => m.value === settings.model
);
if (modelIndex === -1) {
settings.model = availableModels.value[0].value;
}
scrollToBottom();
} catch (error) {
console.error('Error loadModels', error);
isConnected.value = false;
availableModels.value = [
{ label: 'llama3.2', value: 'llama3.2' },
{ label: 'llama3.1', value: 'llama3.1' },
{ label: 'mistral', value: 'mistral' },
{ label: 'codellama', value: 'codellama' },
];
}
};
const saveSettings = () => {
emit('settings-changed', { ...settings });
};
// Lifecycle
onMounted(() => {
try {
loadModels();
} catch (e) {
console.error('Error Mounted Chat:', e);
}
});
watch(
() => settings.model,
(newModel) => {
tools.setCookie('ollama_model', newModel);
},
{ deep: true }
);
// Watch messages for scroll
watch(messages, scrollToBottom, { deep: true });
return {
messages,
inputMessage,
isGenerating,
isConnected,
showSettings,
messagesContainer,
settings,
availableModels,
modelName,
formatMessage,
formatTime,
handleEnter,
sendMessage,
stopGeneration,
clearChat,
saveSettings,
tools,
};
},
};
</script>
<style scoped>
.ollama-chat {
display: flex;
flex-direction: column;
}
.messages-container {
flex: 1;
overflow-y: auto;
}
.message-bubble {
display: inline-block;
max-width: 80%;
border-radius: 16px;
word-break: break-word;
}
.text-right .message-bubble {
border-bottom-right-radius: 4px;
}
.text-left .message-bubble {
border-bottom-left-radius: 4px;
}
.message-content :deep(pre.code-block) {
background: #1e1e1e;
color: #d4d4d4;
padding: 12px;
border-radius: 8px;
overflow-x: auto;
margin: 8px 0;
}
.message-content :deep(.inline-code) {
background: rgba(0, 0, 0, 0.1);
padding: 2px 6px;
border-radius: 4px;
font-family: monospace;
}
.typing-indicator {
display: flex;
gap: 4px;
padding: 8px 0;
}
.typing-indicator span {
width: 8px;
height: 8px;
background: currentColor;
border-radius: 50%;
animation: typing 1.4s infinite;
}
.typing-indicator span:nth-child(2) {
animation-delay: 0.2s;
}
.typing-indicator span:nth-child(3) {
animation-delay: 0.4s;
}
@keyframes typing {
0%,
60%,
100% {
opacity: 0.3;
transform: scale(0.8);
}
30% {
opacity: 1;
transform: scale(1);
}
}
.border-top {
border-top: 1px solid rgba(0, 0, 0, 0.12);
}
.streaming-cursor {
display: inline-block;
animation: blink-cursor 0.8s infinite;
color: var(--q-primary);
margin-left: 2px;
}
@keyframes blink-cursor {
0%,
50% {
opacity: 1;
}
51%,
100% {
opacity: 0;
}
}
</style>

View File

@@ -0,0 +1,388 @@
/**
* OllamaService - Servizio per interagire con Ollama API
* Usalo in Quasar/Vue.js per tutte le chiamate AI
*/
class OllamaService {
constructor(baseUrl = 'http://localhost:11434') {
this.baseUrl = baseUrl.replace(/\/$/, '');
this.defaultModel = '';
this.defaultTemperature = 0.7;
}
// Configura l'URL base
setBaseUrl(url) {
this.baseUrl = url.replace(/\/$/, '');
}
// Configura il modello di default
setDefaultModel(model) {
this.defaultModel = model;
}
/**
* Chat con cronologia messaggi
* @param {Object} options - Opzioni della chat
* @param {string} options.model - Nome del modello
* @param {Array} options.messages - Array di messaggi [{role: 'user'|'assistant', content: '...'}]
* @param {boolean} options.stream - Abilita streaming
* @param {number} options.temperature - Temperatura (0-2)
* @param {string} options.system - System prompt opzionale
* @returns {Promise<string|Response>}
*/
async chat(options) {
const {
model = this.defaultModel,
messages,
stream = false,
temperature = this.defaultTemperature,
system = null,
maxTokens = null,
topP = null,
topK = null,
} = options;
const payload = {
model,
messages,
stream,
options: {
temperature,
...(maxTokens && { num_predict: maxTokens }),
...(topP && { top_p: topP }),
...(topK && { top_k: topK }),
},
};
if (system) {
payload.system = system;
}
const response = await fetch(`${this.baseUrl}/api2/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(payload),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
}
if (stream) return response;
const data = await response.json();
return data.message.content;
}
/**
* Generazione testo semplice (senza cronologia)
* @param {Object} options - Opzioni di generazione
* @param {string} options.model - Nome del modello
* @param {string} options.prompt - Prompt di input
* @param {boolean} options.stream - Abilita streaming
* @param {number} options.temperature - Temperatura (0-2)
* @param {string} options.system - System prompt opzionale
* @returns {Promise<string|Response>}
*/
async generate(options) {
const {
model = this.defaultModel,
prompt,
stream = false,
temperature = this.defaultTemperature,
system = null,
maxTokens = null,
} = options;
const payload = {
model,
prompt,
stream,
options: {
temperature,
...(maxTokens && { num_predict: maxTokens }),
},
};
if (system) {
payload.system = system;
}
const response = await fetch(`${this.baseUrl}/api2/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(payload),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
}
if (stream) return response;
const data = await response.json();
return data.response;
}
/**
* Chat con streaming e callback
* @param {Object} options - Opzioni della chat
* @param {Function} onChunk - Callback per ogni chunk (chunk, fullContent)
* @param {Function} onComplete - Callback al completamento (fullContent)
* @returns {Promise<string>}
*/
async streamChat(options, onChunk, onComplete = null) {
const response = await this.chat({ ...options, stream: true });
const reader = response.body.getReader();
const decoder = new TextDecoder();
let fullContent = '';
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n').filter((line) => line.trim());
for (const line of lines) {
try {
const json = JSON.parse(line);
if (json.message?.content) {
fullContent += json.message.content;
if (onChunk) onChunk(json.message.content, fullContent);
}
} catch (e) {
// Ignora linee non JSON
}
}
}
} finally {
reader.releaseLock();
}
if (onComplete) onComplete(fullContent);
return fullContent;
}
/**
* Generazione con streaming e callback
* @param {Object} options - Opzioni di generazione
* @param {Function} onChunk - Callback per ogni chunk
* @returns {Promise<string>}
*/
async streamGenerate(options, onChunk, onComplete = null) {
const response = await this.generate({ ...options, stream: true });
const reader = response.body.getReader();
const decoder = new TextDecoder();
let fullContent = '';
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n').filter((line) => line.trim());
for (const line of lines) {
try {
const json = JSON.parse(line);
if (json.response) {
fullContent += json.response;
if (onChunk) onChunk(json.response, fullContent);
}
} catch (e) {
// Ignora linee non JSON
}
}
}
} finally {
reader.releaseLock();
}
if (onComplete) onComplete(fullContent);
return fullContent;
}
/**
* Lista modelli disponibili
* @returns {Promise<Array>}
*/
async listModels() {
const response = await fetch(`${this.baseUrl}/api2/models`);
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
const data = await response.json();
return data.models || [];
}
/**
* Informazioni su un modello
* @param {string} model - Nome del modello
* @returns {Promise<Object>}
*/
async showModel(model) {
const response = await fetch(`${this.baseUrl}/api2/show`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: model }),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
return response.json();
}
/**
* Genera embeddings
* @param {string} model - Nome del modello
* @param {string|Array} prompt - Testo o array di testi
* @returns {Promise<Array>}
*/
async embeddings(model, prompt) {
const response = await fetch(`${this.baseUrl}/api2/embeddings`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model, prompt }),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.status}`);
}
const data = await response.json();
return data.embedding;
}
// ============================================
// METODI HELPER PER USI COMUNI
// ============================================
/**
* Genera testo creativo
*/
async generateText(prompt, options = {}) {
return this.generate({
prompt,
temperature: 0.8,
...options,
});
}
/**
* Genera codice
*/
async generateCode(prompt, language = 'javascript', options = {}) {
return this.generate({
prompt: `Scrivi codice ${language} per: ${prompt}\n\nRispondi solo con il codice, senza spiegazioni.`,
temperature: 0.3,
...options,
});
}
/**
* Traduci testo
*/
async translate(text, targetLang = 'english', options = {}) {
return this.generate({
prompt: `Traduci il seguente testo in ${targetLang}. Rispondi solo con la traduzione:\n\n${text}`,
temperature: 0.3,
...options,
});
}
/**
* Riassumi testo
*/
async summarize(text, options = {}) {
return this.generate({
prompt: `Riassumi il seguente testo in modo conciso:\n\n${text}`,
temperature: 0.5,
...options,
});
}
/**
* Rispondi a una domanda basata su un contesto
*/
async answerQuestion(question, context, options = {}) {
return this.generate({
prompt: `Contesto:\n${context}\n\nDomanda: ${question}\n\nRispondi basandoti solo sul contesto fornito.`,
temperature: 0.3,
...options,
});
}
/**
* Estrai informazioni strutturate (JSON)
*/
async extractJSON(text, schema, options = {}) {
const response = await this.generate({
prompt: `Estrai le informazioni dal seguente testo e restituisci un JSON valido con questa struttura: ${JSON.stringify(schema)}\n\nTesto:\n${text}\n\nRispondi SOLO con il JSON, senza altro testo.`,
temperature: 0.1,
...options,
});
try {
// Cerca di estrarre JSON dalla risposta
const jsonMatch = response.match(/\{[\s\S]*\}/);
if (jsonMatch) {
return JSON.parse(jsonMatch[0]);
}
return JSON.parse(response);
} catch (e) {
throw new Error('Failed to parse JSON response: ' + response);
}
}
/**
* Analisi del sentiment
*/
async analyzeSentiment(text, options = {}) {
const response = await this.generate({
prompt: `Analizza il sentiment del seguente testo e rispondi SOLO con un JSON nel formato {"sentiment": "positive"|"negative"|"neutral", "confidence": 0.0-1.0, "explanation": "breve spiegazione"}\n\nTesto: ${text}`,
temperature: 0.1,
...options,
});
try {
const jsonMatch = response.match(/\{[\s\S]*\}/);
if (jsonMatch) {
return JSON.parse(jsonMatch[0]);
}
return JSON.parse(response);
} catch (e) {
return { sentiment: 'unknown', confidence: 0, raw: response };
}
}
/**
* Correggi grammatica
*/
async correctGrammar(text, language = 'italiano', options = {}) {
return this.generate({
prompt: `Correggi gli errori grammaticali nel seguente testo in ${language}. Rispondi solo con il testo corretto:\n\n${text}`,
temperature: 0.2,
...options,
});
}
/**
* Genera lista/bullet points
*/
async generateList(topic, count = 5, options = {}) {
return this.generate({
prompt: `Genera una lista di ${count} elementi su: ${topic}\n\nFormatta come lista puntata.`,
temperature: 0.7,
...options,
});
}
}
// Export per ES6 modules
export default OllamaService;
// Export per CommonJS
if (typeof module !== 'undefined' && module.exports) {
module.exports = OllamaService;
}

View File

@@ -0,0 +1,178 @@
/**
* useOllama - Composable Vue 3 per usare Ollama nelle tue applicazioni Quasar
*
* Esempio d'uso:
*
* import { useOllama } from './useOllama';
*
* const { generate, chat, isLoading, error } = useOllama({
* baseUrl: 'http://localhost:11434',
* model: 'llama3.2'
* });
*
* const result = await generate('Scrivi una poesia');
*/
import { ref, reactive } from 'vue';
import OllamaService from './OllamaService.js';
export function useOllama(options = {}) {
const {
baseUrl = 'http://localhost:11434',
model = 'llama3.2',
temperature = 0.7,
} = options;
// State
const isLoading = ref(false);
const error = ref(null);
const streamingContent = ref('');
const models = ref([]);
// Service instance
const service = new OllamaService(baseUrl);
service.setDefaultModel(model);
/**
* Genera testo
*/
const generate = async (prompt, opts = {}) => {
isLoading.value = true;
error.value = null;
streamingContent.value = '';
try {
const result = await service.generate({
prompt,
temperature,
...opts,
});
return result;
} catch (e) {
error.value = e.message;
throw e;
} finally {
isLoading.value = false;
}
};
/**
* Genera testo con streaming
*/
const generateStream = async (prompt, opts = {}) => {
isLoading.value = true;
error.value = null;
streamingContent.value = '';
try {
const result = await service.streamGenerate(
{ prompt, temperature, ...opts },
(chunk, full) => {
streamingContent.value = full;
}
);
return result;
} catch (e) {
error.value = e.message;
throw e;
} finally {
isLoading.value = false;
}
};
/**
* Chat con messaggi
*/
const chat = async (messages, opts = {}) => {
isLoading.value = true;
error.value = null;
streamingContent.value = '';
try {
const result = await service.chat({
messages,
temperature,
...opts,
});
return result;
} catch (e) {
error.value = e.message;
throw e;
} finally {
isLoading.value = false;
}
};
/**
* Chat con streaming
*/
const chatStream = async (messages, opts = {}) => {
isLoading.value = true;
error.value = null;
streamingContent.value = '';
try {
const result = await service.streamChat(
{ messages, temperature, ...opts },
(chunk, full) => {
streamingContent.value = full;
}
);
return result;
} catch (e) {
error.value = e.message;
throw e;
} finally {
isLoading.value = false;
}
};
/**
* Carica modelli disponibili
*/
const loadModels = async () => {
try {
models.value = await service.listModels();
return models.value;
} catch (e) {
error.value = e.message;
return [];
}
};
// Helper methods
const generateText = (prompt, opts) => service.generateText(prompt, opts);
const generateCode = (prompt, lang, opts) => service.generateCode(prompt, lang, opts);
const translate = (text, lang, opts) => service.translate(text, lang, opts);
const summarize = (text, opts) => service.summarize(text, opts);
const extractJSON = (text, schema, opts) => service.extractJSON(text, schema, opts);
const analyzeSentiment = (text, opts) => service.analyzeSentiment(text, opts);
return {
// State
isLoading,
error,
streamingContent,
models,
// Core methods
generate,
generateStream,
chat,
chatStream,
loadModels,
// Helper methods
generateText,
generateCode,
translate,
summarize,
extractJSON,
analyzeSentiment,
// Service access
service,
};
}
export default useOllama;