Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions launcher/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@ struct Args {
port: u16,
#[clap(default_value = "/tmp/text-generation-server", long, env)]
shard_uds_path: String,
#[clap(default_value = "localhost", long, env)]
#[clap(default_value = "0.0.0.0", long, env)]
master_addr: String,
#[clap(default_value = "29500", long, env)]
#[clap(default_value = "6000", long, env)]
master_port: usize,
#[clap(long, env)]
json_output: bool,
Expand Down
3 changes: 3 additions & 0 deletions server/text_generation/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
torch.backends.cudnn.allow_tf32 = True

# Disable gradients
torch.set_grad_enabled(False)


def get_model(
model_id: str, revision: Optional[str], sharded: bool, quantize: bool
Expand Down
15 changes: 5 additions & 10 deletions server/text_generation/models/causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,17 +289,12 @@ def forward(
def generate_token(
self, batch: CausalLMBatch
) -> Tuple[List[Generation], Optional[CausalLMBatch]]:
# For some reason, inference_mode does not work well with GLOO which we use on CPU
context_manager = (
torch.no_grad if self.device.type == "cpu" else torch.inference_mode
logits, past = self.forward(
batch.input_ids,
batch.attention_mask,
batch.position_ids,
batch.past_key_values,
)
with context_manager():
logits, past = self.forward(
batch.input_ids,
batch.attention_mask,
batch.position_ids,
batch.past_key_values,
)

# List of indices to cache
next_batch_keep_indices = []
Expand Down
19 changes: 7 additions & 12 deletions server/text_generation/models/seq2seq_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,19 +364,14 @@ def forward(
def generate_token(
self, batch: Seq2SeqLMBatch
) -> Tuple[List[Generation], Optional[Seq2SeqLMBatch]]:
# For some reason, inference_mode does not work well with GLOO which we use on CPU
context_manager = (
torch.no_grad if self.device.type == "cpu" else torch.inference_mode
logits, encoder_last_hidden_state, past = self.forward(
batch.input_ids,
batch.attention_mask,
batch.decoder_input_ids,
batch.decoder_attention_mask,
batch.encoder_last_hidden_state,
batch.past_key_values,
)
with context_manager():
logits, encoder_last_hidden_state, past = self.forward(
batch.input_ids,
batch.attention_mask,
batch.decoder_input_ids,
batch.decoder_attention_mask,
batch.encoder_last_hidden_state,
batch.past_key_values,
)

# List of indices to cache
next_batch_keep_indices = []
Expand Down
14 changes: 13 additions & 1 deletion server/text_generation/server.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import asyncio
import os
import torch

from grpc import aio
from loguru import logger
Expand All @@ -19,6 +20,10 @@ def __init__(self, model: Model, cache: Cache, server_urls: List[str]):
self.cache = cache
self.model = model
self.server_urls = server_urls
# For some reason, inference_mode does not work well with GLOO which we use on CPU
if model.device.type == "cuda":
# Force inference mode for the lifetime of TextGenerationService
self._inference_mode_raii_guard = torch._C._InferenceMode(True)

async def ServiceDiscovery(self, request, context):
return generate_pb2.ServiceDiscoveryResponse(urls=self.server_urls)
Expand Down Expand Up @@ -89,7 +94,11 @@ async def serve_inner(
local_url = unix_socket_template.format(uds_path, 0)
server_urls = [local_url]

model = get_model(model_id, revision, sharded, quantize)
try:
model = get_model(model_id, revision, sharded, quantize)
except Exception:
logger.exception("Error when initializing model")
raise

server = aio.server(interceptors=[ExceptionInterceptor()])
generate_pb2_grpc.add_TextGenerationServiceServicer_to_server(
Expand All @@ -101,8 +110,11 @@ async def serve_inner(
)
reflection.enable_server_reflection(SERVICE_NAMES, server)
server.add_insecure_port(local_url)

await server.start()

logger.info("Server started at {}".format(local_url))

try:
await server.wait_for_termination()
except KeyboardInterrupt:
Expand Down
5 changes: 5 additions & 0 deletions server/text_generation/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,9 +171,14 @@ def initialize_torch_distributed():
else:
backend = "gloo"

master_ip = os.getenv("MASTER_ADDR", "0.0.0.0")
master_port = os.getenv("MASTER_PORT", "6000")
init_method = f"tcp://{master_ip}:{master_port}"

# Call the init process.
torch.distributed.init_process_group(
backend=backend,
init_method=init_method,
world_size=world_size,
rank=rank,
timeout=timedelta(seconds=60),
Expand Down