Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Release/2.4 #13

Merged
merged 53 commits into from
Nov 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
53 commits
Select commit Hold shift + click to select a range
84926fe
async + gpt4turbo
surcyf123 Nov 15, 2023
d1ee6a4
gpt4-turbo
surcyf123 Nov 15, 2023
53e3cc7
updates
surcyf123 Nov 16, 2023
fa9d068
images
surcyf123 Nov 16, 2023
80cacab
images
surcyf123 Nov 16, 2023
c53a931
images
surcyf123 Nov 16, 2023
e229aa7
refactor
surcyf123 Nov 16, 2023
7caa366
refactor
surcyf123 Nov 16, 2023
46aad01
refactor
surcyf123 Nov 16, 2023
fecfe8e
fix
surcyf123 Nov 16, 2023
1973530
remove utils
surcyf123 Nov 16, 2023
931c836
fix
surcyf123 Nov 17, 2023
aaeafe1
fix
surcyf123 Nov 17, 2023
8e2bff6
fix
surcyf123 Nov 17, 2023
219415e
fix
surcyf123 Nov 17, 2023
0280350
bug fixes
surcyf123 Nov 17, 2023
ebc5305
working streaming
surcyf123 Nov 17, 2023
c25cabc
new openai version
surcyf123 Nov 17, 2023
992a107
working streaming
surcyf123 Nov 17, 2023
2844de8
images progress
surcyf123 Nov 17, 2023
4c0a3ce
images progress
surcyf123 Nov 17, 2023
50c8466
image validation
surcyf123 Nov 17, 2023
2c73d4f
image validation
surcyf123 Nov 18, 2023
678115a
async stuff
surcyf123 Nov 18, 2023
5023d2c
async
surcyf123 Nov 18, 2023
53ae8e0
fix
surcyf123 Nov 18, 2023
b029938
scoring images
surcyf123 Nov 18, 2023
41aee60
fix
surcyf123 Nov 18, 2023
37f6c34
fixes
surcyf123 Nov 18, 2023
848ed8b
fix
surcyf123 Nov 18, 2023
1892899
fix
surcyf123 Nov 18, 2023
a76cfe7
fix
surcyf123 Nov 18, 2023
ffd2d0e
fix
surcyf123 Nov 18, 2023
8c8c8fe
assosiated scoring
surcyf123 Nov 18, 2023
68a9970
state management
surcyf123 Nov 18, 2023
e433066
reorg
surcyf123 Nov 18, 2023
a30173d
fix keyboard interrupt
surcyf123 Nov 18, 2023
7831d2b
final touches to images
surcyf123 Nov 18, 2023
cdac482
fix
surcyf123 Nov 18, 2023
4b9fa59
fix set_weights
surcyf123 Nov 18, 2023
c46fc40
adding wandb
surcyf123 Nov 19, 2023
e6e5183
fix
surcyf123 Nov 19, 2023
5e21254
update reqs
surcyf123 Nov 19, 2023
f06a585
reorg
surcyf123 Nov 19, 2023
cb10259
remove dir
surcyf123 Nov 19, 2023
268f810
remove image dir
surcyf123 Nov 19, 2023
14f3f07
images
surcyf123 Nov 19, 2023
2bc76d4
fix
surcyf123 Nov 19, 2023
fc3cf88
remove images
surcyf123 Nov 19, 2023
52af283
async lock for get_quesiton
surcyf123 Nov 19, 2023
4f6fd22
fix recursive async lock
surcyf123 Nov 19, 2023
230d6e9
fix themes bug
surcyf123 Nov 19, 2023
de4a545
added traceback import
surcyf123 Nov 19, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
alias.sh
cloudflared.deb
go1.21.0.linux-amd64.tar.gz

/neurons/state.json
Binary file added neurons/__pycache__/config.cpython-39.pyc
Binary file not shown.
56 changes: 0 additions & 56 deletions neurons/endpoint.py

This file was deleted.

86 changes: 71 additions & 15 deletions neurons/miner.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,28 +4,33 @@
import argparse
import threading
import traceback
import os
from abc import ABC, abstractmethod
from functools import partial
from starlette.types import Send
import openai
from openai import OpenAI
from openai import AsyncOpenAI
import bittensor as bt
from transformers import GPT2Tokenizer
from typing import List, Dict, Tuple, Union, Callable, Awaitable

from template.protocol import StreamPrompting, IsAlive
from template.protocol import StreamPrompting, IsAlive, ImageResponse
from config import get_config, check_config

OpenAI.api_key = os.environ.get('OPENAI_API_KEY')
if not OpenAI.api_key:
raise ValueError("Please set the OPENAI_API_KEY environment variable.")

client = AsyncOpenAI(timeout=30.0)


class StreamMiner(ABC):
def __init__(self, config=None, axon=None, wallet=None, subtensor=None):
# Setup base config from Miner.config() and merge with subclassed config.
bt.logging.info("starting stream miner")
base_config = copy.deepcopy(config or get_config())
self.config = self.config()
self.config.merge(base_config)

check_config(StreamMiner, self.config)
bt.logging.info(self.config) # TODO: duplicate print?

self.prompt_cache: Dict[str, Tuple[str, int]] = {}

# Activating Bittensor's logging with the set configurations.
Expand Down Expand Up @@ -68,6 +73,8 @@ def __init__(self, config=None, axon=None, wallet=None, subtensor=None):
forward_fn=self._prompt,
).attach(
forward_fn=self.is_alive,
).attach(
forward_fn=self._images,
)
bt.logging.info(f"Axon created: {self.axon}")

Expand All @@ -90,6 +97,9 @@ def add_args(cls, parser: argparse.ArgumentParser):
def _prompt(self, synapse: StreamPrompting) -> StreamPrompting:
return self.prompt(synapse)

async def _images(self, synapse: ImageResponse) -> ImageResponse:
return await self.images(synapse)

def is_alive(self, synapse: IsAlive) -> IsAlive:
bt.logging.info("answered to be active")
synapse.completion = "True"
Expand All @@ -99,14 +109,18 @@ def is_alive(self, synapse: IsAlive) -> IsAlive:
def prompt(self, synapse: StreamPrompting) -> StreamPrompting:
...

@abstractmethod
def images(self, synapse: ImageResponse) -> ImageResponse:
...

def run(self):
if not self.subtensor.is_hotkey_registered(
netuid=self.config.netuid,
hotkey_ss58=self.wallet.hotkey.ss58_address,
):
bt.logging.error(
f"Wallet: {self.wallet} is not registered on netuid {self.config.netuid}"
f"Please register the hotkey using `btcli subnets register` before trying again"
f"Please register the hotkey using `btcli s register --netuid 18` before trying again"
)
exit()
bt.logging.info(
Expand All @@ -132,7 +146,6 @@ def run(self):
# --- Wait for next bloc.
time.sleep(1)
current_block = self.subtensor.get_current_block()

# --- Check if we should exit.
if self.should_exit:
break
Expand Down Expand Up @@ -203,25 +216,67 @@ def config(self) -> "bt.Config":
def add_args(cls, parser: argparse.ArgumentParser):
pass

async def images(self, synapse: ImageResponse) -> ImageResponse:
bt.logging.info(f"called image axon {synapse}")
try:
# Extract necessary information from synapse
engine = synapse.engine
messages = synapse.messages
size = synapse.size
quality = synapse.quality
style = synapse.style

# Await the response from the asynchronous function
meta = await client.images.generate(
model=engine,
prompt=messages,
size=size,
quality=quality,
style=style,
)

image_created = meta.created
image_url = meta.data[0].url
image_revised_prompt = meta.data[0].revised_prompt
# image_b64 = meta.data[0].revised_prompt

image_data = {
"created_at": image_created,
"url": image_url,
"revised_prompt": image_revised_prompt,
# "b64": image_b64
}

synapse.completion = image_data
bt.logging.info(f"returning image response of {synapse.completion}")
return synapse

except Exception as e:
bt.logging.error(f"error in images: {e}\n{traceback.format_exc()}")



def prompt(self, synapse: StreamPrompting) -> StreamPrompting:
bt.logging.info(f"starting processing for synapse {synapse}")

async def _prompt(synapse, send: Send):
try:
engine = synapse.engine
messages = synapse.messages
bt.logging.info(f"question is {messages} with engine {engine}")
response = openai.ChatCompletion.create(
seed=synapse.seed
bt.logging.info(synapse)
bt.logging.info(f"question is {messages} with engine {engine}, seed: {seed}")
response = await client.chat.completions.create(
model= engine,
messages= messages,
temperature= 0,
stream= True
temperature= 0.0001,
stream= True,
seed=seed,
)
buffer = []
N=1
for chunk in response:
try: token = str(chunk['choices'][0]['delta']['content'])
except: continue
async for chunk in response:
token = chunk.choices[0].delta.content or ""
buffer.append(token)
if len(buffer) == N:
joined_buffer = "".join(buffer)
Expand All @@ -245,6 +300,7 @@ async def _prompt(synapse, send: Send):
}
)
bt.logging.info(f"Streamed tokens: {joined_buffer}")
print(f"response is {response}")
except Exception as e:
bt.logging.error(f"error in _prompt {e}\n{traceback.format_exc()}")

Expand Down
53 changes: 0 additions & 53 deletions neurons/new.py

This file was deleted.

18 changes: 18 additions & 0 deletions neurons/test_images.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import requests
from openai import OpenAI

# Generate image with DALL-E 3
client = OpenAI()
response = client.images.generate(
model="dall-e-3",
prompt="a super cool image",
size="1024x1024",
quality="standard", # or hd (double the cost)
style="vivid", # or natural
n=1,
)

image_url = response.data[0].url
image_revised_prompt = response.data[0].revised_prompt
image_created = response.created
print(f"created at {image_created}\n\nrevised_prompt = {image_revised_prompt}\n\nurl = {image_url}")
52 changes: 29 additions & 23 deletions neurons/test_openai.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,44 @@
import openai
import time
import asyncio
import os
import ast
import traceback
from openai import OpenAI
from openai import AsyncOpenAI

openai.api_key = os.environ.get('OPENAI_API_KEY')
if not openai.api_key:
OpenAI.api_key = os.environ.get('OPENAI_API_KEY')
if not OpenAI.api_key:
raise ValueError("Please set the OPENAI_API_KEY environment variable.")

def send_openai_request(prompt, engine = "gpt-3.5-turbo"):
client = AsyncOpenAI(timeout=30)

async def send_openai_request(prompt, engine="gpt-4-1106-preview"):
try:
response = openai.ChatCompletion.create(
stream = await client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
stream=True,
model=engine,
messages=[{'role': 'user', 'content': prompt}],
temperature=0,
stream=True
seed=1234,
temperature=0.0001,
)

collected_messages = []
for chunk in response:
try:
chunk_message = str(chunk['choices'][0]['delta']['content'])
except:
continue
print(chunk_message)
collected_messages.append(chunk_message)

all_messages = ' '.join(collected_messages)

async for part in stream:
print(part.choices[0].delta.content or "")
collected_messages.append(part.choices[0].delta.content or "")

all_messages = ''.join(collected_messages)
return all_messages

except Exception as e:
print(f"Got exception when calling openai {e}")
traceback.print_exc() # This will print the full traceback
traceback.print_exc()
return "Error calling model"

prompt = "count to 10"
print(send_openai_request(prompt))
async def main():
prompts = ["count to 10", "tell me a joke"]
tasks = [send_openai_request(prompt) for prompt in prompts]

responses = await asyncio.gather(*tasks)
for response in responses:
print(response)

asyncio.run(main())
Loading