Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 13 additions & 7 deletions neurons/validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,10 @@ async def spawn_loops(task_queue, scoring_queue, reward_events, miners_dict):
logger.info("Starting loops...")
profile = asyncio.create_task(profiler.print_stats(), name="Profiler")
# TODO: Revisit why do we need simultaneous loops?
tasks = asyncio.create_task(task_loop.start(task_queue, scoring_queue, miners_dict, simultaneous_loops=4))
tasks = asyncio.create_task(task_loop.start(task_queue, scoring_queue, miners_dict, simultaneous_loops=2))
models = asyncio.create_task(model_scheduler.start(scoring_queue, event_restart), name="ModelScheduler")
scorer = asyncio.create_task(
task_scorer.start(model_scheduler, scoring_queue, reward_events, simultaneous_loops=4), name="TaskScorer"
task_scorer.start(model_scheduler, scoring_queue, reward_events, simultaneous_loops=2), name="TaskScorer"
)
all_tasks = [profile, tasks, models, scorer]

Expand Down Expand Up @@ -113,7 +113,7 @@ async def start():
asyncio.run(start())


async def start_task_sending_loop(
def start_task_sending_loop(
task_queue: list,
scoring_queue: list,
miners_dict: dict,
Expand All @@ -122,14 +122,15 @@ async def spawn_loops(task_queue, scoring_queue, miners_dict: dict):
from prompting.tasks.task_sending import task_sender

logger.info("Starting task sending loop in validator...")
asyncio.create_task(task_sender.start(task_queue, scoring_queue, miners_dict, simultaneous_loops=4))
asyncio.create_task(task_sender.start(task_queue, scoring_queue, miners_dict, simultaneous_loops=1))
logger.error("Task sending loop started")
while True:
await asyncio.sleep(5)
logger.debug("Task sending loop is running")

try:
logger.info("Starting task sending loop in validator...")
await spawn_loops(task_queue, scoring_queue, miners_dict)
asyncio.run(spawn_loops(task_queue, scoring_queue, miners_dict))

except Exception as e:
logger.exception(f"Task sending loop error: {e}")
Expand Down Expand Up @@ -222,8 +223,13 @@ async def main(
)
tasks.append(loop_task)

sending_task = asyncio.create_task(start_task_sending_loop(task_queue, scoring_queue, miners_dict))
tasks.append(sending_task)
sending_task = mp.Process(
target=start_task_sending_loop,
args=(task_queue, scoring_queue, miners_dict),
name="SendingTaskProcess",
)
sending_task.start()
processes.append(sending_task)

weight_setter_process = mp.Process(
target=start_weight_setter_loop,
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "prompting"
version = "2.17.10"
version = "2.17.11"
description = "Subnetwork 1 runs on Bittensor and is maintained by Macrocosmos. It's an effort to create decentralised AI"
authors = ["Kalei Brady, Dmytro Bobrenko, Felix Quinque, Steffen Cruz, Richard Wardle"]
readme = "README.md"
Expand Down
12 changes: 11 additions & 1 deletion shared/epistula.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,8 @@ async def make_openai_query(
choices = []
chunks = []
chunk_timings = []
last_finish_reason = None # Only track the finish reason of the last chunk

async for chunk in chat:
if not chunk.choices:
continue
Expand All @@ -248,11 +250,19 @@ async def make_openai_query(
choices.append("")
if choice.delta.content:
choices[i] += choice.delta.content
# Save finish reason from the last chunk, safely handling the attribute
if hasattr(choice, "finish_reason") and choice.finish_reason is not None:
last_finish_reason = choice.finish_reason
if chunk.choices[0].delta.content:
chunks.append(chunk.choices[0].delta.content)
chunk_timings.append(time.perf_counter() - start_time)

choices = [
Choice(index=i, message=ChatCompletionMessage(content=choice, role="assistant"), finish_reason="stop")
Choice(
index=i,
message=ChatCompletionMessage(content=choice, role="assistant"),
finish_reason=last_finish_reason or "stop", # Use the captured finish_reason or fallback to "stop"
)
for i, choice in enumerate(choices)
]
# TODO: We need to find a better way to do this instead of sometimes returning a tuple and sometimes not, but for now this has to do
Expand Down