.
This commit is contained in:
+1
-1
@@ -7,7 +7,7 @@ api:
|
||||
models:
|
||||
enrich: "lm_studio/qwen/qwen3-8b"
|
||||
embedding: "text-embedding-qwen3-embedding-8b"
|
||||
retrieval: "lm_studio/qwen/qwen3-next-80b"
|
||||
retrieval: "lm_studio/qwen/qwen3-30b-a3b-2507"
|
||||
|
||||
# --- Ingestion Settings ---
|
||||
ingestion:
|
||||
|
||||
+25
-15
@@ -4,12 +4,10 @@ from pathlib import Path
|
||||
from datetime import datetime
|
||||
import dspy
|
||||
from langchain_community.document_loaders import TextLoader
|
||||
from langchain_community.vectorstores import FAISS
|
||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||
from tqdm import tqdm
|
||||
from typing import List, Dict, Any
|
||||
|
||||
|
||||
from config_loader import load_config
|
||||
from embedding import LocalLMEmbeddings
|
||||
from experts.ingestion_agent import IngestionAgent
|
||||
@@ -29,6 +27,7 @@ TIMEFILE = CFG["ingestion"]["time_file_location"]
|
||||
|
||||
RIGHT_NOW = datetime.now().isoformat()
|
||||
|
||||
|
||||
def load_documents(last_update_time):
|
||||
docs = []
|
||||
data_path = Path(DATA_DIR)
|
||||
@@ -59,10 +58,12 @@ def load_documents(last_update_time):
|
||||
print(f"✅ Loaded: {len(docs)} Files")
|
||||
return docs
|
||||
|
||||
|
||||
def normalize_path(path_str):
|
||||
"""Convert string path to normalized absolute path."""
|
||||
return str(Path(path_str).resolve())
|
||||
|
||||
|
||||
def chunk_documents(docs):
|
||||
# LangChain preserves metadata during splitting automatically
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
@@ -90,7 +91,6 @@ def enrich_chunks(chunks: list) -> list:
|
||||
chunk.metadata.update(metadata)
|
||||
return (idx, chunk)
|
||||
|
||||
|
||||
enriched_results = []
|
||||
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
|
||||
# Wrap chunks in enumerate to keep track of order
|
||||
@@ -163,7 +163,7 @@ def embed_chunks(chunks: List[Any], batch_size: int = EMBEDDING_BATCH_SIZE) -> L
|
||||
"entities": entities,
|
||||
"embedding": embedding,
|
||||
"timestamp": RIGHT_NOW,
|
||||
"original_index": i + j # Track original position
|
||||
"original_index": i + j, # Track original position
|
||||
}
|
||||
|
||||
embedded_chunks.append(chunk_data)
|
||||
@@ -192,13 +192,14 @@ def embed_chunks(chunks: List[Any], batch_size: int = EMBEDDING_BATCH_SIZE) -> L
|
||||
"entities": entities,
|
||||
"embedding": embedding,
|
||||
"timestamp": RIGHT_NOW,
|
||||
"original_index": i + j
|
||||
"original_index": i + j,
|
||||
}
|
||||
embedded_chunks.append(chunk_data)
|
||||
|
||||
except Exception as inner_e:
|
||||
print(f"❌ Failed to embed individual chunk {i + j}: {inner_e}")
|
||||
embedded_chunks.append({
|
||||
embedded_chunks.append(
|
||||
{
|
||||
"file_path": normalize_path(chunk.metadata.get("full_path", "unknown")),
|
||||
"file_name": chunk.metadata.get("source", "unknown"),
|
||||
"chunk_data": content,
|
||||
@@ -207,20 +208,22 @@ def embed_chunks(chunks: List[Any], batch_size: int = EMBEDDING_BATCH_SIZE) -> L
|
||||
"entities": [],
|
||||
"embedding": [],
|
||||
"timestamp": RIGHT_NOW,
|
||||
"original_index": i + j
|
||||
})
|
||||
"original_index": i + j,
|
||||
}
|
||||
)
|
||||
|
||||
print(f"✅ Completed embedding {len(embedded_chunks)} chunks in batches of {batch_size}.")
|
||||
return embedded_chunks
|
||||
|
||||
|
||||
def save_to_db(chunk_dicts):
|
||||
"""
|
||||
Save a list of dictionaries to the Turso database.
|
||||
Each dict maps to a row in the 'notes' table.
|
||||
"""
|
||||
print('connecting to db')
|
||||
print("connecting to db")
|
||||
con = turso.connect(DATABASE_PATH)
|
||||
print('opening cursor')
|
||||
print("opening cursor")
|
||||
cur = con.cursor()
|
||||
|
||||
# SQL with named placeholders for clarity and safety
|
||||
@@ -236,7 +239,8 @@ def save_to_db(chunk_dicts):
|
||||
# Convert list of floats to comma-separated string for Turso vector32
|
||||
embedding_str = str(entry["embedding"])
|
||||
|
||||
batch_data.append((
|
||||
batch_data.append(
|
||||
(
|
||||
entry["file_path"],
|
||||
entry["file_name"],
|
||||
entry["chunk_data"],
|
||||
@@ -244,9 +248,10 @@ def save_to_db(chunk_dicts):
|
||||
",".join(entry["tags"]), # Store as comma-separated string
|
||||
",".join(entry["entities"]), # Store as comma-separated string
|
||||
embedding_str,
|
||||
entry["timestamp"]
|
||||
))
|
||||
print('data to insert:',len(batch_data))
|
||||
entry["timestamp"],
|
||||
)
|
||||
)
|
||||
print("data to insert:", len(batch_data))
|
||||
# Execute batch insert
|
||||
cur.executemany(insert_sql, batch_data)
|
||||
con.commit()
|
||||
@@ -254,6 +259,7 @@ def save_to_db(chunk_dicts):
|
||||
|
||||
print(f"✅ Saved {len(batch_data)} chunks to database.")
|
||||
|
||||
|
||||
def create_db():
|
||||
con = turso.connect(DATABASE_PATH)
|
||||
cur = con.cursor()
|
||||
@@ -283,6 +289,7 @@ def create_db():
|
||||
con.close()
|
||||
print("✅ Database and indexes created.")
|
||||
|
||||
|
||||
def get_last_update_time():
|
||||
try:
|
||||
with open(TIMEFILE, "r") as file:
|
||||
@@ -293,6 +300,7 @@ def get_last_update_time():
|
||||
last_update = datetime(year=2000, month=1, day=1)
|
||||
return last_update
|
||||
|
||||
|
||||
def update_timefile():
|
||||
current_time = datetime.now()
|
||||
current_time_str = current_time.strftime("%Y/%m/%d - %H:%M:%S")
|
||||
@@ -300,6 +308,7 @@ def update_timefile():
|
||||
file.write(current_time_str)
|
||||
return current_time_str
|
||||
|
||||
|
||||
def delete_from_db(embedded_chunks):
|
||||
"""
|
||||
Delete existing rows from the 'notes' table where the file_path matches
|
||||
@@ -337,7 +346,6 @@ def delete_from_db(embedded_chunks):
|
||||
con.close()
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
create_db()
|
||||
last_update_time = get_last_update_time()
|
||||
@@ -366,5 +374,7 @@ def main():
|
||||
updated = update_timefile()
|
||||
print(f"Updated timefile to: {updated}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user