Skip to content

Commit

Permalink
feat(server): check cuda capability when importing flash models (#201)
Browse files Browse the repository at this point in the history
close #198
  • Loading branch information
OlivierDehaene committed Apr 19, 2023
1 parent e14ae3b commit a88c54b
Showing 1 changed file with 12 additions and 1 deletion.
13 changes: 12 additions & 1 deletion server/text_generation_server/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,18 @@
FlashSantacoderSharded,
)

FLASH_ATTENTION = torch.cuda.is_available()
if torch.cuda.is_available():
major, minor = torch.cuda.get_device_capability()
is_sm75 = major == 7 and minor == 5
is_sm8x = major == 8 and minor >= 0
is_sm90 = major == 9 and minor == 0

supported = is_sm75 or is_sm8x or is_sm90
if not supported:
raise ImportError(f"GPU with CUDA capability {major} {minor} is not supported")
FLASH_ATTENTION = True
else:
FLASH_ATTENTION = False
except ImportError:
logger.opt(exception=True).warning(
"Could not import Flash Attention enabled models"
Expand Down

0 comments on commit a88c54b

Please sign in to comment.