fix: colors
This commit is contained in:
@@ -3,6 +3,7 @@ EMBEDDING_MODEL=mxbai-embed-large:latest
|
|||||||
LLM_MODEL=qwen2.5:7b-instruct-q8_0
|
LLM_MODEL=qwen2.5:7b-instruct-q8_0
|
||||||
OLLAMA_BASE_URL=http://localhost:11434
|
OLLAMA_BASE_URL=http://localhost:11434
|
||||||
|
|
||||||
|
ANSWER_COLOR=purple
|
||||||
SYSTEM_PROMPT="You are a precise technical assistant. Cite sources using [filename]. Be concise."
|
SYSTEM_PROMPT="You are a precise technical assistant. Cite sources using [filename]. Be concise."
|
||||||
|
|
||||||
USER_PROMPT_TEMPLATE="Previous Conversation:
|
USER_PROMPT_TEMPLATE="Previous Conversation:
|
||||||
|
|||||||
17
main.py
17
main.py
@@ -36,6 +36,7 @@ load_dotenv()
|
|||||||
style = Style.from_dict({"prompt": "bold #6a0dad"})
|
style = Style.from_dict({"prompt": "bold #6a0dad"})
|
||||||
|
|
||||||
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
|
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
|
||||||
|
ANSWER_COLOR = os.getenv("ANSWER_COLOR", "blue")
|
||||||
|
|
||||||
SYSTEM_PROMPT_SEARCH = os.getenv("SYSTEM_PROMPT", "You are a precise technical assistant. Cite sources using [filename]. Be concise.")
|
SYSTEM_PROMPT_SEARCH = os.getenv("SYSTEM_PROMPT", "You are a precise technical assistant. Cite sources using [filename]. Be concise.")
|
||||||
SYSTEM_PROMPT_ANALYSIS = (
|
SYSTEM_PROMPT_ANALYSIS = (
|
||||||
@@ -49,7 +50,7 @@ SYSTEM_PROMPT_ANALYSIS = (
|
|||||||
USER_PROMPT_TEMPLATE = os.getenv("USER_PROMPT_TEMPLATE",
|
USER_PROMPT_TEMPLATE = os.getenv("USER_PROMPT_TEMPLATE",
|
||||||
"Previous Conversation:\n{history}\n\nContext from Docs:\n{context}\n\nCurrent Question: {question}")
|
"Previous Conversation:\n{history}\n\nContext from Docs:\n{context}\n\nCurrent Question: {question}")
|
||||||
|
|
||||||
MD_DIRECTORY = os.getenv("MD_FOLDER", "./my_docs")
|
MD_DIRECTORY = os.getenv("MD_FOLDER", "./notes")
|
||||||
EMBEDDING_MODEL = os.getenv("EMBEDDING_MODEL", "nomic-embed-text")
|
EMBEDDING_MODEL = os.getenv("EMBEDDING_MODEL", "nomic-embed-text")
|
||||||
LLM_MODEL = os.getenv("LLM_MODEL", "llama3")
|
LLM_MODEL = os.getenv("LLM_MODEL", "llama3")
|
||||||
|
|
||||||
@@ -91,7 +92,7 @@ def classify_intent(query: str) -> str:
|
|||||||
r"what have i learned", r"summary of (my )?notes",
|
r"what have i learned", r"summary of (my )?notes",
|
||||||
r"my progress", r"learning path", r"knowledge gap",
|
r"my progress", r"learning path", r"knowledge gap",
|
||||||
r"оцени (мой )?прогресс", r"что я выучил", r"итоги", r"анализ знаний",
|
r"оцени (мой )?прогресс", r"что я выучил", r"итоги", r"анализ знаний",
|
||||||
r"сегодня урок", r"что я изучил"
|
r"сегодня(?:\s+\w+)*\s*урок", r"что я изучил"
|
||||||
]
|
]
|
||||||
|
|
||||||
query_lower = query.lower()
|
query_lower = query.lower()
|
||||||
@@ -286,7 +287,7 @@ async def main():
|
|||||||
processor = ChunkProcessor(vectorstore)
|
processor = ChunkProcessor(vectorstore)
|
||||||
cache = load_hash_cache()
|
cache = load_hash_cache()
|
||||||
|
|
||||||
console.print("Checking documents...", style="yellow")
|
# Checking documents
|
||||||
files = [
|
files = [
|
||||||
os.path.join(root, file)
|
os.path.join(root, file)
|
||||||
for root, _, files in os.walk(MD_DIRECTORY)
|
for root, _, files in os.walk(MD_DIRECTORY)
|
||||||
@@ -306,17 +307,17 @@ async def main():
|
|||||||
observer = start_watcher(processor, cache)
|
observer = start_watcher(processor, cache)
|
||||||
memory = ConversationMemory()
|
memory = ConversationMemory()
|
||||||
|
|
||||||
console.print("💬 Ready! Type 'exit' to quit.", style="bold green")
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
query = await session.prompt_async("> ", style=style)
|
query = await session.prompt_async("> ", style=style)
|
||||||
query = query.strip()
|
query = query.strip()
|
||||||
if query.lower() in {"exit", "quit", "q"}:
|
if query.lower() in {"exit", "quit", "q"}:
|
||||||
console.print("Goodbye!", style="yellow")
|
console.print("\nGoodbye!", style="yellow")
|
||||||
break
|
break
|
||||||
if not query: continue
|
if not query: continue
|
||||||
|
|
||||||
|
console.print()
|
||||||
|
|
||||||
mode = classify_intent(query)
|
mode = classify_intent(query)
|
||||||
history_str = memory.get_history()
|
history_str = memory.get_history()
|
||||||
|
|
||||||
@@ -366,7 +367,7 @@ async def main():
|
|||||||
"question": query,
|
"question": query,
|
||||||
"history": history_str
|
"history": history_str
|
||||||
}):
|
}):
|
||||||
print(chunk, end="")
|
console.print(chunk, end="", style=ANSWER_COLOR)
|
||||||
response += chunk
|
response += chunk
|
||||||
console.print("\n")
|
console.print("\n")
|
||||||
|
|
||||||
@@ -385,5 +386,5 @@ if __name__ == "__main__":
|
|||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
loop.run_until_complete(main())
|
loop.run_until_complete(main())
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
console.print("Goodbye!", style="yellow")
|
console.print("\nGoodbye!", style="yellow")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|||||||
Reference in New Issue
Block a user