Fixed checking for incomplete error from openrouter - version 1.1.3

main
PeninsulaInd 2025-10-31 11:49:45 -05:00
parent de6b97dbc1
commit 14df158a7a
3 changed files with 30 additions and 13 deletions

View File

@ -46,7 +46,8 @@ class AIClient:
max_tokens: int = 4000, max_tokens: int = 4000,
temperature: float = 0.7, temperature: float = 0.7,
json_mode: bool = False, json_mode: bool = False,
override_model: Optional[str] = None override_model: Optional[str] = None,
title: Optional[str] = None
) -> str: ) -> str:
""" """
Generate completion from OpenRouter API Generate completion from OpenRouter API
@ -60,7 +61,7 @@ class AIClient:
override_model: If provided, use this model instead of self.model override_model: If provided, use this model instead of self.model
Returns: Returns:
Generated text completion Tuple of (generated text completion, finish_reason)
""" """
messages = [] messages = []
if system_message: if system_message:
@ -86,9 +87,15 @@ class AIClient:
try: try:
response = self.client.chat.completions.create(**kwargs) response = self.client.chat.completions.create(**kwargs)
content = response.choices[0].message.content or "" content = response.choices[0].message.content or ""
finish_reason = response.choices[0].finish_reason
if finish_reason != "stop":
title_str = title if title else "N/A"
print(f"{title_str} - {finish_reason} - {model_to_use}")
if json_mode: if json_mode:
print(f"[DEBUG] AI Response (first 200 chars): {content[:200]}") print(f"[DEBUG] AI Response (first 200 chars): {content[:200]}")
return content return content, finish_reason
except RateLimitError as e: except RateLimitError as e:
if attempt < retries - 1: if attempt < retries - 1:

View File

@ -47,6 +47,7 @@ class BatchProcessor:
"generated_articles": 0, "generated_articles": 0,
"augmented_articles": 0, "augmented_articles": 0,
"failed_articles": 0, "failed_articles": 0,
"articles_with_error": 0,
"tier1_time": 0.0, "tier1_time": 0.0,
"tier2_time": 0.0, "tier2_time": 0.0,
"tier3_time": 0.0, "tier3_time": 0.0,
@ -319,7 +320,7 @@ class BatchProcessor:
click.echo(f"{prefix} Generated outline: {h2_count} H2s, {h3_count} H3s") click.echo(f"{prefix} Generated outline: {h2_count} H2s, {h3_count} H3s")
click.echo(f"{prefix} Generating content...") click.echo(f"{prefix} Generating content...")
content = self.generator.generate_content( content, finish_reason = self.generator.generate_content(
project_id=project_id, project_id=project_id,
title=title, title=title,
outline=outline, outline=outline,
@ -329,6 +330,9 @@ class BatchProcessor:
model=models.content if models else None model=models.content if models else None
) )
if finish_reason != "stop":
self.stats["articles_with_error"] += 1
word_count = self.generator.count_words(content) word_count = self.generator.count_words(content)
click.echo(f"{prefix} Generated content: {word_count:,} words") click.echo(f"{prefix} Generated content: {word_count:,} words")
@ -508,7 +512,7 @@ class BatchProcessor:
click.echo(f"{prefix} Generated outline: {h2_count} H2s, {h3_count} H3s") click.echo(f"{prefix} Generated outline: {h2_count} H2s, {h3_count} H3s")
click.echo(f"{prefix} Generating content...") click.echo(f"{prefix} Generating content...")
content = thread_generator.generate_content( content, finish_reason = thread_generator.generate_content(
project_id=project_id, project_id=project_id,
title=title, title=title,
outline=outline, outline=outline,
@ -518,6 +522,10 @@ class BatchProcessor:
model=models.content if models else None model=models.content if models else None
) )
if finish_reason != "stop":
with self.stats_lock:
self.stats["articles_with_error"] += 1
word_count = thread_generator.count_words(content) word_count = thread_generator.count_words(content)
click.echo(f"{prefix} Generated content: {word_count:,} words") click.echo(f"{prefix} Generated content: {word_count:,} words")
@ -731,6 +739,7 @@ class BatchProcessor:
click.echo(f"Articles generated: {self.stats['generated_articles']}/{self.stats['total_articles']}") click.echo(f"Articles generated: {self.stats['generated_articles']}/{self.stats['total_articles']}")
click.echo(f"Augmented: {self.stats['augmented_articles']}") click.echo(f"Augmented: {self.stats['augmented_articles']}")
click.echo(f"Failed: {self.stats['failed_articles']}") click.echo(f"Failed: {self.stats['failed_articles']}")
click.echo(f"Articles With Error From OpenRouter: {self.stats['articles_with_error']}")
click.echo("") click.echo("")
click.echo("TIMING") click.echo("TIMING")
click.echo("-" * 60) click.echo("-" * 60)

View File

@ -58,7 +58,7 @@ class ContentGenerator:
related_searches=related_str related_searches=related_str
) )
title = self.ai_client.generate_completion( title, _ = self.ai_client.generate_completion(
prompt=user_prompt, prompt=user_prompt,
system_message=system_msg, system_message=system_msg,
max_tokens=100, max_tokens=100,
@ -120,7 +120,7 @@ class ContentGenerator:
batch_titles = None batch_titles = None
for attempt in range(3): for attempt in range(3):
try: try:
response = self.ai_client.generate_completion( response, _ = self.ai_client.generate_completion(
prompt=user_prompt, prompt=user_prompt,
system_message=system_msg, system_message=system_msg,
max_tokens=100 * current_batch_size, max_tokens=100 * current_batch_size,
@ -216,7 +216,7 @@ class ContentGenerator:
related_searches=related_str related_searches=related_str
) )
outline_json = self.ai_client.generate_completion( outline_json, _ = self.ai_client.generate_completion(
prompt=user_prompt, prompt=user_prompt,
system_message=system_msg, system_message=system_msg,
max_tokens=2000, max_tokens=2000,
@ -283,7 +283,7 @@ class ContentGenerator:
model: Optional model override for this generation stage model: Optional model override for this generation stage
Returns: Returns:
HTML string with <h2>, <h3>, <p> tags Tuple of (HTML string with <h2>, <h3>, <p> tags, finish_reason)
""" """
project = self.project_repo.get_by_id(project_id) project = self.project_repo.get_by_id(project_id)
if not project: if not project:
@ -311,12 +311,13 @@ class ContentGenerator:
words_per_section=words_per_section words_per_section=words_per_section
) )
content = self.ai_client.generate_completion( content, finish_reason = self.ai_client.generate_completion(
prompt=user_prompt, prompt=user_prompt,
system_message=system_msg, system_message=system_msg,
max_tokens=12000, max_tokens=12000,
temperature=0.7, temperature=0.7,
override_model=model override_model=model,
title=title
) )
content = content.strip() content = content.strip()
@ -327,7 +328,7 @@ class ContentGenerator:
project_id, "content", content, "html" project_id, "content", content, "html"
) )
return content return content, finish_reason
def validate_word_count(self, content: str, min_words: int, max_words: int) -> Tuple[bool, int]: def validate_word_count(self, content: str, min_words: int, max_words: int) -> Tuple[bool, int]:
""" """
@ -387,7 +388,7 @@ class ContentGenerator:
target_word_count=target_word_count target_word_count=target_word_count
) )
augmented = self.ai_client.generate_completion( augmented, _ = self.ai_client.generate_completion(
prompt=user_prompt, prompt=user_prompt,
system_message=system_msg, system_message=system_msg,
max_tokens=8000, max_tokens=8000,