This commit is contained in:
2026-02-11 07:32:38 +00:00
parent 8d0a74e865
commit 09508931af
2 changed files with 54 additions and 44 deletions
+1 -1
View File
@@ -7,7 +7,7 @@ api:
models: models:
enrich: "lm_studio/qwen/qwen3-8b" enrich: "lm_studio/qwen/qwen3-8b"
embedding: "text-embedding-qwen3-embedding-8b" embedding: "text-embedding-qwen3-embedding-8b"
retrieval: "lm_studio/qwen/qwen3-next-80b" retrieval: "lm_studio/qwen/qwen3-30b-a3b-2507"
# --- Ingestion Settings --- # --- Ingestion Settings ---
ingestion: ingestion:
+25 -15
View File
@@ -4,12 +4,10 @@ from pathlib import Path
from datetime import datetime from datetime import datetime
import dspy import dspy
from langchain_community.document_loaders import TextLoader from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain_text_splitters import RecursiveCharacterTextSplitter
from tqdm import tqdm from tqdm import tqdm
from typing import List, Dict, Any from typing import List, Dict, Any
from config_loader import load_config from config_loader import load_config
from embedding import LocalLMEmbeddings from embedding import LocalLMEmbeddings
from experts.ingestion_agent import IngestionAgent from experts.ingestion_agent import IngestionAgent
@@ -29,6 +27,7 @@ TIMEFILE = CFG["ingestion"]["time_file_location"]
RIGHT_NOW = datetime.now().isoformat() RIGHT_NOW = datetime.now().isoformat()
def load_documents(last_update_time): def load_documents(last_update_time):
docs = [] docs = []
data_path = Path(DATA_DIR) data_path = Path(DATA_DIR)
@@ -59,10 +58,12 @@ def load_documents(last_update_time):
print(f"✅ Loaded: {len(docs)} Files") print(f"✅ Loaded: {len(docs)} Files")
return docs return docs
def normalize_path(path_str): def normalize_path(path_str):
"""Convert string path to normalized absolute path.""" """Convert string path to normalized absolute path."""
return str(Path(path_str).resolve()) return str(Path(path_str).resolve())
def chunk_documents(docs): def chunk_documents(docs):
# LangChain preserves metadata during splitting automatically # LangChain preserves metadata during splitting automatically
text_splitter = RecursiveCharacterTextSplitter( text_splitter = RecursiveCharacterTextSplitter(
@@ -90,7 +91,6 @@ def enrich_chunks(chunks: list) -> list:
chunk.metadata.update(metadata) chunk.metadata.update(metadata)
return (idx, chunk) return (idx, chunk)
enriched_results = [] enriched_results = []
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor: with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
# Wrap chunks in enumerate to keep track of order # Wrap chunks in enumerate to keep track of order
@@ -163,7 +163,7 @@ def embed_chunks(chunks: List[Any], batch_size: int = EMBEDDING_BATCH_SIZE) -> L
"entities": entities, "entities": entities,
"embedding": embedding, "embedding": embedding,
"timestamp": RIGHT_NOW, "timestamp": RIGHT_NOW,
"original_index": i + j # Track original position "original_index": i + j, # Track original position
} }
embedded_chunks.append(chunk_data) embedded_chunks.append(chunk_data)
@@ -192,13 +192,14 @@ def embed_chunks(chunks: List[Any], batch_size: int = EMBEDDING_BATCH_SIZE) -> L
"entities": entities, "entities": entities,
"embedding": embedding, "embedding": embedding,
"timestamp": RIGHT_NOW, "timestamp": RIGHT_NOW,
"original_index": i + j "original_index": i + j,
} }
embedded_chunks.append(chunk_data) embedded_chunks.append(chunk_data)
except Exception as inner_e: except Exception as inner_e:
print(f"❌ Failed to embed individual chunk {i + j}: {inner_e}") print(f"❌ Failed to embed individual chunk {i + j}: {inner_e}")
embedded_chunks.append({ embedded_chunks.append(
{
"file_path": normalize_path(chunk.metadata.get("full_path", "unknown")), "file_path": normalize_path(chunk.metadata.get("full_path", "unknown")),
"file_name": chunk.metadata.get("source", "unknown"), "file_name": chunk.metadata.get("source", "unknown"),
"chunk_data": content, "chunk_data": content,
@@ -207,20 +208,22 @@ def embed_chunks(chunks: List[Any], batch_size: int = EMBEDDING_BATCH_SIZE) -> L
"entities": [], "entities": [],
"embedding": [], "embedding": [],
"timestamp": RIGHT_NOW, "timestamp": RIGHT_NOW,
"original_index": i + j "original_index": i + j,
}) }
)
print(f"✅ Completed embedding {len(embedded_chunks)} chunks in batches of {batch_size}.") print(f"✅ Completed embedding {len(embedded_chunks)} chunks in batches of {batch_size}.")
return embedded_chunks return embedded_chunks
def save_to_db(chunk_dicts): def save_to_db(chunk_dicts):
""" """
Save a list of dictionaries to the Turso database. Save a list of dictionaries to the Turso database.
Each dict maps to a row in the 'notes' table. Each dict maps to a row in the 'notes' table.
""" """
print('connecting to db') print("connecting to db")
con = turso.connect(DATABASE_PATH) con = turso.connect(DATABASE_PATH)
print('opening cursor') print("opening cursor")
cur = con.cursor() cur = con.cursor()
# SQL with named placeholders for clarity and safety # SQL with named placeholders for clarity and safety
@@ -236,7 +239,8 @@ def save_to_db(chunk_dicts):
# Convert list of floats to comma-separated string for Turso vector32 # Convert list of floats to comma-separated string for Turso vector32
embedding_str = str(entry["embedding"]) embedding_str = str(entry["embedding"])
batch_data.append(( batch_data.append(
(
entry["file_path"], entry["file_path"],
entry["file_name"], entry["file_name"],
entry["chunk_data"], entry["chunk_data"],
@@ -244,9 +248,10 @@ def save_to_db(chunk_dicts):
",".join(entry["tags"]), # Store as comma-separated string ",".join(entry["tags"]), # Store as comma-separated string
",".join(entry["entities"]), # Store as comma-separated string ",".join(entry["entities"]), # Store as comma-separated string
embedding_str, embedding_str,
entry["timestamp"] entry["timestamp"],
)) )
print('data to insert:',len(batch_data)) )
print("data to insert:", len(batch_data))
# Execute batch insert # Execute batch insert
cur.executemany(insert_sql, batch_data) cur.executemany(insert_sql, batch_data)
con.commit() con.commit()
@@ -254,6 +259,7 @@ def save_to_db(chunk_dicts):
print(f"✅ Saved {len(batch_data)} chunks to database.") print(f"✅ Saved {len(batch_data)} chunks to database.")
def create_db(): def create_db():
con = turso.connect(DATABASE_PATH) con = turso.connect(DATABASE_PATH)
cur = con.cursor() cur = con.cursor()
@@ -283,6 +289,7 @@ def create_db():
con.close() con.close()
print("✅ Database and indexes created.") print("✅ Database and indexes created.")
def get_last_update_time(): def get_last_update_time():
try: try:
with open(TIMEFILE, "r") as file: with open(TIMEFILE, "r") as file:
@@ -293,6 +300,7 @@ def get_last_update_time():
last_update = datetime(year=2000, month=1, day=1) last_update = datetime(year=2000, month=1, day=1)
return last_update return last_update
def update_timefile(): def update_timefile():
current_time = datetime.now() current_time = datetime.now()
current_time_str = current_time.strftime("%Y/%m/%d - %H:%M:%S") current_time_str = current_time.strftime("%Y/%m/%d - %H:%M:%S")
@@ -300,6 +308,7 @@ def update_timefile():
file.write(current_time_str) file.write(current_time_str)
return current_time_str return current_time_str
def delete_from_db(embedded_chunks): def delete_from_db(embedded_chunks):
""" """
Delete existing rows from the 'notes' table where the file_path matches Delete existing rows from the 'notes' table where the file_path matches
@@ -337,7 +346,6 @@ def delete_from_db(embedded_chunks):
con.close() con.close()
def main(): def main():
create_db() create_db()
last_update_time = get_last_update_time() last_update_time = get_last_update_time()
@@ -366,5 +374,7 @@ def main():
updated = update_timefile() updated = update_timefile()
print(f"Updated timefile to: {updated}") print(f"Updated timefile to: {updated}")
if __name__ == "__main__": if __name__ == "__main__":
main() main()