-
Notifications
You must be signed in to change notification settings - Fork 1.2k
/
gen.py
2841 lines (2616 loc) · 142 KB
/
gen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import ast
import copy
import functools
import glob
import inspect
import queue
import sys
import os
import time
import traceback
import typing
import warnings
from datetime import datetime
import filelock
import requests
import psutil
from requests import ConnectTimeout, JSONDecodeError
from urllib3.exceptions import ConnectTimeoutError, MaxRetryError, ConnectionError
from requests.exceptions import ConnectionError as ConnectionError2
from requests.exceptions import ReadTimeout as ReadTimeout2
if os.path.dirname(os.path.abspath(__file__)) not in sys.path:
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
os.environ['HF_HUB_DISABLE_TELEMETRY'] = '1'
os.environ['BITSANDBYTES_NOWELCOME'] = '1'
warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated')
from evaluate_params import eval_func_param_names, no_default_param_names
from enums import DocumentSubset, LangChainMode, no_lora_str, model_token_mapping, no_model_str, source_prefix, \
source_postfix, LangChainAction, LangChainAgent, DocumentChoice
from loaders import get_loaders
from utils import set_seed, clear_torch_cache, save_generate_output, NullContext, wrapped_partial, EThread, get_githash, \
import_matplotlib, get_device, makedirs, get_kwargs, start_faulthandler, get_hf_server, FakeTokenizer, remove, \
have_langchain, set_openai, load_collection_enum, cuda_vis_check
start_faulthandler()
import_matplotlib()
SEED = 1236
set_seed(SEED)
from typing import Union
import fire
import torch
from transformers import GenerationConfig, AutoModel, TextIteratorStreamer
from prompter import Prompter, inv_prompt_type_to_model_lower, non_hf_types, PromptType, get_prompt, generate_prompt
from stopping import get_stopping
langchain_actions = [x.value for x in list(LangChainAction)]
langchain_agents_list = [x.value for x in list(LangChainAgent)]
scratch_base_dir = '/tmp/'
def main(
load_8bit: bool = False,
load_4bit: bool = False,
load_half: bool = True,
load_gptq: str = '',
load_exllama: bool = False,
use_safetensors: bool = False,
revision: str = None,
use_gpu_id: bool = True,
base_model: str = '',
tokenizer_base_model: str = '',
lora_weights: str = "",
gpu_id: int = 0,
compile_model: bool = True,
use_cache: bool = None,
inference_server: str = "",
prompt_type: Union[int, str] = None,
prompt_dict: typing.Dict = None,
model_lock: typing.List[typing.Dict[str, str]] = None,
model_lock_columns: int = None,
fail_if_cannot_connect: bool = False,
# input to generation
temperature: float = None,
top_p: float = None,
top_k: int = None,
num_beams: int = None,
repetition_penalty: float = None,
num_return_sequences: int = None,
do_sample: bool = None,
max_new_tokens: int = None,
min_new_tokens: int = None,
early_stopping: Union[bool, str] = None,
max_time: float = None,
memory_restriction_level: int = None,
debug: bool = False,
save_dir: str = None,
share: bool = False,
local_files_only: bool = False,
resume_download: bool = True,
use_auth_token: Union[str, bool] = False,
trust_remote_code: Union[str, bool] = True,
rope_scaling: dict = None,
max_seq_len: int = None,
offload_folder: str = "offline_folder",
src_lang: str = "English",
tgt_lang: str = "Russian",
cli: bool = False,
cli_loop: bool = True,
gradio: bool = True,
gradio_offline_level: int = 0,
root_path: str = "",
chat: bool = True,
chat_context: bool = False,
stream_output: bool = True,
async_output: bool = True,
num_async: int = 3,
show_examples: bool = None,
verbose: bool = False,
h2ocolors: bool = True,
dark: bool = False, # light tends to be best
height: int = 600,
show_lora: bool = True,
login_mode_if_model0: bool = False,
block_gradio_exit: bool = True,
concurrency_count: int = 1,
api_open: bool = False,
allow_api: bool = True,
input_lines: int = 1,
gradio_size: str = None,
auth: typing.List[typing.Tuple[str, str]] = None,
max_max_time=None,
max_max_new_tokens=None,
sanitize_user_prompt: bool = False,
sanitize_bot_response: bool = False,
extra_model_options: typing.List[str] = [],
extra_lora_options: typing.List[str] = [],
extra_server_options: typing.List[str] = [],
score_model: str = 'auto',
eval_filename: str = None,
eval_prompts_only_num: int = 0,
eval_prompts_only_seed: int = 1234,
eval_as_output: bool = False,
langchain_mode: str = None,
langchain_action: str = LangChainAction.QUERY.value,
langchain_agents: list = [],
force_langchain_evaluate: bool = False,
langchain_modes: list = [x.value for x in list(LangChainMode)],
visible_langchain_modes: list = ['UserData', 'MyData'],
# WIP:
# visible_langchain_actions: list = langchain_actions.copy(),
visible_langchain_actions: list = [LangChainAction.QUERY.value, LangChainAction.SUMMARIZE_MAP.value],
visible_langchain_agents: list = langchain_agents_list.copy(),
document_subset: str = DocumentSubset.Relevant.name,
document_choice: list = [DocumentChoice.ALL.value],
user_path: str = None,
langchain_mode_paths: dict = {'UserData': None},
detect_user_path_changes_every_query: bool = False,
use_llm_if_no_docs: bool = True,
load_db_if_exists: bool = True,
keep_sources_in_context: bool = False,
use_system_prompt: bool = False,
db_type: str = 'chroma',
use_openai_embedding: bool = False,
use_openai_model: bool = False,
hf_embedding_model: str = None,
cut_distance: float = 1.64,
add_chat_history_to_context: bool = True,
allow_upload_to_user_data: bool = True,
reload_langchain_state: bool = True,
allow_upload_to_my_data: bool = True,
enable_url_upload: bool = True,
enable_text_upload: bool = True,
enable_sources_list: bool = True,
chunk: bool = True,
chunk_size: int = 512,
top_k_docs: int = None,
reverse_docs: bool = True,
auto_reduce_chunks: bool = True,
max_chunks: int = 100,
n_jobs: int = -1,
enable_captions: bool = True,
captions_model: str = "Salesforce/blip-image-captioning-base",
pre_load_caption_model: bool = False,
caption_gpu: bool = True,
enable_ocr: bool = False,
enable_pdf_ocr: str = 'auto',
):
"""
:param load_8bit: load model in 8-bit using bitsandbytes
:param load_4bit: load model in 4-bit using bitsandbytes
:param load_half: load model in float16
:param load_gptq: to load model with GPTQ, put model_basename here, e.g. gptq_model-4bit--1g
:param load_exllama: whether to use exllama (only applicable to LLaMa1/2 models with 16-bit or GPTQ
:param use_safetensors: to use safetensors version (assumes file/HF points to safe tensors version)
:param revision: Which HF revision to use
:param use_gpu_id: whether to control devices with gpu_id. If False, then spread across GPUs
:param base_model: model HF-type name. If use --base_model to preload model, cannot unload in gradio in models tab
:param tokenizer_base_model: tokenizer HF-type name. Usually not required, inferred from base_model.
:param lora_weights: LORA weights path/HF link
:param gpu_id: if use_gpu_id, then use gpu_id for cuda device ID, or auto mode if gpu_id != -1
:param compile_model Whether to compile the model
:param use_cache: Whether to use caching in model (some models fail when multiple threads use)
:param inference_server: Consume base_model as type of model at this address
Address can be text-generation-server hosting that base_model
e.g. python generate.py --inference_server="http://192.168.1.46:6112" --base_model=h2oai/h2ogpt-oasst1-512-12b
Or Address can be "openai_chat" or "openai" for OpenAI API
e.g. python generate.py --inference_server="openai_chat" --base_model=gpt-3.5-turbo
e.g. python generate.py --inference_server="openai" --base_model=text-davinci-003
Or Address can be "vllm:IP:port" or "vllm:IP:port" for OpenAI-compliant vLLM endpoint
Note: vllm_chat not supported by vLLM project.
:param prompt_type: type of prompt, usually matched to fine-tuned model or plain for foundational model
:param prompt_dict: If prompt_type=custom, then expects (some) items returned by get_prompt(..., return_dict=True)
:param model_lock: Lock models to specific combinations, for ease of use and extending to many models
Only used if gradio = True
List of dicts, each dict has base_model, tokenizer_base_model, lora_weights, inference_server, prompt_type, and prompt_dict
If all models have same prompt_type, and prompt_dict, can still specify that once in CLI outside model_lock as default for dict
Can specify model_lock instead of those items on CLI
As with CLI itself, base_model can infer prompt_type and prompt_dict if in prompter.py.
Also, tokenizer_base_model and lora_weights are optional.
Also, inference_server is optional if loading model from local system.
All models provided will automatically appear in compare model mode
Model loading-unloading and related choices will be disabled. Model/lora/server adding will be disabled
:param model_lock_columns: How many columns to show if locking models (and so showing all at once)
If None, then defaults to up to 3
if -1, then all goes into 1 row
Maximum value is 4 due to non-dynamic gradio rendering elements
:param fail_if_cannot_connect: if doing model locking (e.g. with many models), fail if True. Otherwise ignore.
Useful when many endpoints and want to just see what works, but still have to wait for timeout.
:param temperature: generation temperature
:param top_p: generation top_p
:param top_k: generation top_k
:param num_beams: generation number of beams
:param repetition_penalty: generation repetition penalty
:param num_return_sequences: generation number of sequences (1 forced for chat)
:param do_sample: generation sample
:param max_new_tokens: generation max new tokens
:param min_new_tokens: generation min tokens
:param early_stopping: generation early stopping
:param max_time: maximum time to allow for generation
:param memory_restriction_level: 0 = no restriction to tokens or model, 1 = some restrictions on token 2 = HF like restriction 3 = very low memory case
:param debug: enable debug mode
:param save_dir: directory chat data is saved to
:param share: whether to share the gradio app with sharable URL
:param local_files_only: whether to only use local files instead of doing to HF for models
:param resume_download: whether to resume downloads from HF for models
:param use_auth_token: whether to use HF auth token (requires CLI did huggingface-cli login before)
:param trust_remote_code: whether to use trust any code needed for HF model
:param rope_scaling:
For HF transformers model: scaling for rope-based models, e.g. --rope_scaling="{'type':'dynamic', 'factor':4}"
For exllama model: --rope_scaling="{'alpha_value':4}" . This automatically scales max_seq_len for exllama
:param offload_folder: path for spilling model onto disk
:param src_lang: source languages to include if doing translation (None = all)
:param tgt_lang: target languages to include if doing translation (None = all)
:param cli: whether to use CLI (non-gradio) interface.
:param cli_loop: whether to loop for CLI (False usually only for testing)
:param gradio: whether to enable gradio, or to enable benchmark mode
:param gradio_offline_level: > 0, then change fonts so full offline
== 1 means backend won't need internet for fonts, but front-end UI might if font not cached
== 2 means backend and frontend don't need internet to download any fonts.
Note: Some things always disabled include HF telemetry, gradio telemetry, chromadb posthog that involve uploading.
This option further disables google fonts for downloading, which is less intrusive than uploading,
but still required in air-gapped case. The fonts don't look as nice as google fonts, but ensure full offline behavior.
Also set --share=False to avoid sharing a gradio live link.
:param root_path: The root path (or "mount point") of the application,
if it's not served from the root ("/") of the domain. Often used when the application is behind a reverse proxy
that forwards requests to the application. For example, if the application is served at "https://example.com/myapp",
the `root_path` should be set to "/myapp".
:param chat: whether to enable chat mode with chat history
:param chat_context: whether to use extra helpful context if human_bot
:param stream_output: whether to stream output
:param async_output: Whether to do asyncio handling
For summarization
Applicable to HF TGI server
Only if stream_output=False in CLI, UI, or API
:param num_async: Number of simultaneously allowed asyncio calls to make for async_output
Too many will overload inference server, too few will be too slow
:param show_examples: whether to show clickable examples in gradio
:param verbose: whether to show verbose prints
:param h2ocolors: whether to use H2O.ai theme
:param dark: whether to use dark mode for UI by default (still controlled in UI)
:param height: height of chat window
:param show_lora: whether to show LORA options in UI (expert so can be hard to understand)
:param login_mode_if_model0: set to True to load --base_model after client logs in, to be able to free GPU memory when model is swapped
:param block_gradio_exit: whether to block gradio exit (used for testing)
:param concurrency_count: gradio concurrency count (1 is optimal for LLMs)
:param api_open: If False, don't let API calls skip gradio queue
:param allow_api: whether to allow API calls at all to gradio server
:param input_lines: how many input lines to show for chat box (>1 forces shift-enter for submit, else enter is submit)
:param gradio_size: Overall size of text and spaces: "xsmall", "small", "medium", "large".
Small useful for many chatbots in model_lock mode
:param auth: gradio auth for launcher in form [(user1, pass1), (user2, pass2), ...]
e.g. --auth=[('jon','password')] with no spaces
:param max_max_time: Maximum max_time for gradio slider
:param max_max_new_tokens: Maximum max_new_tokens for gradio slider
:param sanitize_user_prompt: whether to remove profanity from user input (slows down input processing)
Requires optional packages:
pip install alt-profanity-check==1.2.2 better-profanity==0.7.0
:param sanitize_bot_response: whether to remove profanity and repeat lines from bot output (about 2x slower generation for long streaming cases due to better_profanity being slow)
:param extra_model_options: extra models to show in list in gradio
:param extra_lora_options: extra LORA to show in list in gradio
:param extra_server_options: extra servers to show in list in gradio
:param score_model: which model to score responses
None: no response scoring
'auto': auto mode, '' (no model) for CPU or 1 GPU, 'OpenAssistant/reward-model-deberta-v3-large-v2' for >=2 GPUs,
because on CPU takes too much compute just for scoring response
:param eval_filename: json file to use for evaluation, if None is sharegpt
:param eval_prompts_only_num: for no gradio benchmark, if using eval_filename prompts for eval instead of examples
:param eval_prompts_only_seed: for no gradio benchmark, seed for eval_filename sampling
:param eval_as_output: for no gradio benchmark, whether to test eval_filename output itself
:param langchain_mode: Data source to include. Choose "UserData" to only consume files from make_db.py.
None: auto mode, check if langchain package exists, at least do LLM if so, else Disabled
WARNING: wiki_full requires extra data processing via read_wiki_full.py and requires really good workstation to generate db, unless already present.
:param langchain_action: Mode langchain operations in on documents.
Query: Make query of document(s)
Summarize or Summarize_map_reduce: Summarize document(s) via map_reduce
Summarize_all: Summarize document(s) using entire document at once
Summarize_refine: Summarize document(s) using entire document, and try to refine before returning summary
:param langchain_agents: Which agents to use
'search': Use Web Search as context for LLM response, e.g. SERP if have SERPAPI_API_KEY in env
:param force_langchain_evaluate: Whether to force langchain LLM use even if not doing langchain, mostly for testing.
:param user_path: user path to glob from to generate db for vector search, for 'UserData' langchain mode.
If already have db, any new/changed files are added automatically if path set, does not have to be same path used for prior db sources
:param langchain_mode_paths: dict of langchain_mode keys and disk path values to use for source of documents
E.g. "{'UserData2': 'userpath2'}"
Can be None even if existing DB, to avoid new documents being added from that path, source links that are on disk still work.
If user_path is not None, that path is used for 'UserData' instead of the value in this dict
:param detect_user_path_changes_every_query: whether to detect if any files changed or added every similarity search (by file hashes).
Expensive for large number of files, so not done by default. By default only detect changes during db loading.
:param langchain_modes: names of collections/dbs to potentially have
:param visible_langchain_modes: dbs to generate at launch to be ready for LLM
Can be up to ['wiki', 'wiki_full', 'UserData', 'MyData', 'github h2oGPT', 'DriverlessAI docs']
But wiki_full is expensive and requires preparation
To allow scratch space only live in session, add 'MyData' to list
Default: If only want to consume local files, e.g. prepared by make_db.py, only include ['UserData']
If have own user modes, need to add these here or add in UI.
A state file is stored in visible_langchain_modes.pkl containing last UI-selected values of:
langchain_modes, visible_langchain_modes, and langchain_mode_paths
Delete the file if you want to start fresh,
but in any case the user_path passed in CLI is used for UserData even if was None or different
:param visible_langchain_actions: Which actions to allow
:param visible_langchain_agents: Which agents to allow
:param document_subset: Default document choice when taking subset of collection
:param document_choice: Chosen document(s) by internal name, 'All' means use all docs
:param use_llm_if_no_docs: Whether to use LLM even if no documents, when langchain_mode=UserData or MyData or custom
:param load_db_if_exists: Whether to load chroma db if exists or re-generate db
:param keep_sources_in_context: Whether to keep url sources in context, not helpful usually
:param use_system_prompt: Whether to use system prompt (e.g. llama2 safe system prompt)
:param db_type: 'faiss' for in-memory or 'chroma' or 'weaviate' for persisted on disk
:param use_openai_embedding: Whether to use OpenAI embeddings for vector db
:param use_openai_model: Whether to use OpenAI model for use with vector db
:param hf_embedding_model: Which HF embedding model to use for vector db
Default is instructor-large with 768 parameters per embedding if have GPUs, else all-MiniLM-L6-v2 if no GPUs
Can also choose simpler model with 384 parameters per embedding: "sentence-transformers/all-MiniLM-L6-v2"
Can also choose even better embedding with 1024 parameters: 'hkunlp/instructor-xl'
We support automatically changing of embeddings for chroma, with a backup of db made if this is done
:param cut_distance: Distance to cut off references with larger distances when showing references.
1.64 is good to avoid dropping references for all-MiniLM-L6-v2, but instructor-large will always show excessive references.
For all-MiniLM-L6-v2, a value of 1.5 can push out even more references, or a large value of 100 can avoid any loss of references.
:param add_chat_history_to_context: Include chat context when performing action
Not supported yet for openai_chat when using document collection instead of LLM
Also not supported when using CLI mode
:param allow_upload_to_user_data: Whether to allow file uploads to update shared vector db (UserData or custom user dbs)
:param reload_langchain_state: Whether to reload visible_langchain_modes.pkl file that contains any new user collections.
:param allow_upload_to_my_data: Whether to allow file uploads to update scratch vector db
:param enable_url_upload: Whether to allow upload from URL
:param enable_text_upload: Whether to allow upload of text
:param enable_sources_list: Whether to allow list (or download for non-shared db) of list of sources for chosen db
:param chunk: Whether to chunk data (True unless know data is already optimally chunked)
:param chunk_size: Size of chunks, with typically top-4 passed to LLM, so needs to be in context length
:param top_k_docs: number of chunks to give LLM
:param reverse_docs: whether to reverse docs order so most relevant is closest to question.
Best choice for sufficiently smart model, and truncation occurs for oldest context, so best then too.
But smaller 6_9 models fail to use newest context and can get stuck on old information.
:param auto_reduce_chunks: Whether to automatically reduce top_k_docs to fit context given prompt
:param max_chunks: If top_k_docs=-1, maximum number of chunks to allow
:param n_jobs: Number of processors to use when consuming documents (-1 = all, is default)
:param enable_captions: Whether to support captions using BLIP for image files as documents, then preloads that model
:param captions_model: Which model to use for captions.
captions_model: str = "Salesforce/blip-image-captioning-base", # continue capable
captions_model: str = "Salesforce/blip2-flan-t5-xl", # question/answer capable, 16GB state
captions_model: str = "Salesforce/blip2-flan-t5-xxl", # question/answer capable, 60GB state
Note: opt-based blip2 are not permissive license due to opt and Meta license restrictions
Disabled for CPU since BLIP requires CUDA
:param pre_load_caption_model: Whether to preload caption model, or load after forking parallel doc loader
parallel loading disabled if preload and have images, to prevent deadlocking on cuda context
Recommended if using larger caption model
:param caption_gpu: If support caption, then use GPU if exists
:param enable_ocr: Whether to support OCR on images
:param enable_pdf_ocr: 'auto' means only use OCR if normal text extraction fails. Useful for pure image-based PDFs with text
'on' means always do OCR as additional parsing of same documents
'off' means don't do OCR (e.g. because it's slow even if 'auto' only would trigger if nothing else worked)
:return:
"""
if base_model is None:
base_model = ''
if tokenizer_base_model is None:
tokenizer_base_model = ''
if lora_weights is None:
lora_weights = ''
if inference_server is None:
inference_server = ''
# listen to env if set
model_lock = os.getenv('model_lock', str(model_lock))
model_lock = ast.literal_eval(model_lock)
if model_lock:
assert gradio, "model_lock only supported for gradio=True"
if len(model_lock) > 1:
assert chat, "model_lock only works for multiple models for chat=True"
assert not cli, "model_lock only supported for cli=False"
assert not (not cli and not gradio), "model_lock only supported for eval (cli=gradio=False)"
assert not base_model, "Don't specify model_lock and base_model"
assert not tokenizer_base_model, "Don't specify model_lock and tokenizer_base_model"
assert not lora_weights, "Don't specify model_lock and lora_weights"
assert not inference_server, "Don't specify model_lock and inference_server"
# assert not prompt_type, "Don't specify model_lock and prompt_type"
# assert not prompt_dict, "Don't specify model_lock and prompt_dict"
n_jobs = int(os.getenv('n_jobs', str(n_jobs)))
is_hf = bool(int(os.getenv("HUGGINGFACE_SPACES", '0')))
is_gpth2oai = bool(int(os.getenv("GPT_H2O_AI", '0')))
is_public = is_hf or is_gpth2oai # multi-user case with fixed model and disclaimer
if memory_restriction_level is None:
memory_restriction_level = 2 if is_hf else 0 # 2 assumes run on 24GB consumer GPU
else:
assert 0 <= memory_restriction_level <= 3, "Bad memory_restriction_level=%s" % memory_restriction_level
if is_public and os.getenv('n_jobs') is None:
n_jobs = max(1, min(os.cpu_count() // 2, 8))
admin_pass = os.getenv("ADMIN_PASS")
# will sometimes appear in UI or sometimes actual generation, but maybe better than empty result
# but becomes unrecoverable sometimes if raise, so just be silent for now
raise_generate_gpu_exceptions = True
if isinstance(rope_scaling, str):
rope_scaling = ast.literal_eval(rope_scaling)
# allow set token directly
use_auth_token = os.environ.get("HUGGINGFACE_API_TOKEN", use_auth_token)
allow_upload_to_user_data = bool(
int(os.environ.get("allow_upload_to_user_data", str(int(allow_upload_to_user_data)))))
allow_upload_to_my_data = bool(int(os.environ.get("allow_upload_to_my_data", str(int(allow_upload_to_my_data)))))
height = int(os.environ.get("HEIGHT", height))
h2ocolors = bool(int(os.getenv('h2ocolors', h2ocolors)))
# allow enabling langchain via ENV
# FIRST PLACE where LangChain referenced, but no imports related to it
langchain_mode = os.environ.get("LANGCHAIN_MODE", langchain_mode)
if langchain_mode is not None:
assert langchain_mode in langchain_modes, "Invalid langchain_mode %s" % langchain_mode
visible_langchain_modes = ast.literal_eval(os.environ.get("visible_langchain_modes", str(visible_langchain_modes)))
if langchain_mode not in visible_langchain_modes and langchain_mode in langchain_modes:
if langchain_mode is not None:
visible_langchain_modes += [langchain_mode]
# update
if isinstance(langchain_mode_paths, str):
langchain_mode_paths = ast.literal_eval(langchain_mode_paths)
assert isinstance(langchain_mode_paths, dict)
if user_path:
langchain_mode_paths['UserData'] = user_path
makedirs(user_path)
if is_public:
allow_upload_to_user_data = False
if LangChainMode.USER_DATA.value in visible_langchain_modes:
visible_langchain_modes.remove(LangChainMode.USER_DATA.value)
# in-place, for non-scratch dbs
if allow_upload_to_user_data:
update_langchain(langchain_modes, visible_langchain_modes, langchain_mode_paths, '')
# always listen to CLI-passed user_path if passed
if user_path:
langchain_mode_paths['UserData'] = user_path
assert langchain_action in langchain_actions, "Invalid langchain_action %s" % langchain_action
assert len(
set(langchain_agents).difference(langchain_agents_list)) == 0, "Invalid langchain_agents %s" % langchain_agents
# if specifically chose not to show My or User Data, disable upload, so gradio elements are simpler
if LangChainMode.MY_DATA.value not in visible_langchain_modes:
allow_upload_to_my_data = False
if LangChainMode.USER_DATA.value not in visible_langchain_modes:
allow_upload_to_user_data = False
# auto-set langchain_mode
if have_langchain and langchain_mode is None:
# start in chat mode, in case just want to chat and don't want to get "No documents to query" by default.
langchain_mode = LangChainMode.LLM.value
if allow_upload_to_user_data and not is_public and langchain_mode_paths['UserData']:
print("Auto set langchain_mode=%s. Could use UserData instead." % langchain_mode, flush=True)
elif allow_upload_to_my_data:
print("Auto set langchain_mode=%s. Could use MyData instead."
" To allow UserData to pull files from disk,"
" set user_path or langchain_mode_paths, and ensure allow_upload_to_user_data=True" % langchain_mode,
flush=True)
else:
raise RuntimeError("Please pass --langchain_mode=<chosen mode> out of %s" % langchain_modes)
if not have_langchain and langchain_mode not in [None, LangChainMode.DISABLED.value, LangChainMode.LLM.value]:
raise RuntimeError("Asked for LangChain mode but langchain python package cannot be found.")
if langchain_mode is None:
# if not set yet, disable
langchain_mode = LangChainMode.DISABLED.value
print("Auto set langchain_mode=%s Have langchain package: %s" % (langchain_mode, have_langchain), flush=True)
if is_public:
allow_upload_to_user_data = False
input_lines = 1 # ensure set, for ease of use
temperature = 0.2 if temperature is None else temperature
top_p = 0.85 if top_p is None else top_p
top_k = 70 if top_k is None else top_k
if is_hf:
do_sample = True if do_sample is None else do_sample
top_k_docs = 3 if top_k_docs is None else top_k_docs
else:
# by default don't sample, too chatty
do_sample = False if do_sample is None else do_sample
top_k_docs = 4 if top_k_docs is None else top_k_docs
if memory_restriction_level == 2:
if not base_model and not inference_server and not model_lock:
base_model = 'h2oai/h2ogpt-oasst1-512-12b'
# don't set load_8bit if passed base_model, doesn't always work so can't just override
load_8bit = True
load_4bit = False # FIXME - consider using 4-bit instead of 8-bit
elif not inference_server:
top_k_docs = 10 if top_k_docs is None else top_k_docs
if memory_restriction_level >= 2:
load_8bit = True
load_4bit = False # FIXME - consider using 4-bit instead of 8-bit
if hf_embedding_model is None:
hf_embedding_model = "sentence-transformers/all-MiniLM-L6-v2"
top_k_docs = 3 if top_k_docs is None else top_k_docs
if top_k_docs is None:
top_k_docs = 3
if is_public:
if not max_time:
max_time = 60 * 2
if not max_max_time:
max_max_time = max_time
if not max_new_tokens:
max_new_tokens = 256
if not max_max_new_tokens:
max_max_new_tokens = 512
else:
if not max_max_time:
max_max_time = 60 * 20
if not max_max_new_tokens:
max_max_new_tokens = 1024
if is_hf:
# must override share if in spaces
share = False
if not max_time:
max_time = 60 * 1
if not max_max_time:
max_max_time = max_time
# HF accounted for later in get_max_max_new_tokens()
save_dir = os.getenv('SAVE_DIR', save_dir)
score_model = os.getenv('SCORE_MODEL', score_model)
if str(score_model) == 'None':
score_model = ''
concurrency_count = int(os.getenv('CONCURRENCY_COUNT', concurrency_count))
api_open = bool(int(os.getenv('API_OPEN', str(int(api_open)))))
allow_api = bool(int(os.getenv('ALLOW_API', str(int(allow_api)))))
n_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0
n_gpus, gpu_ids = cuda_vis_check(n_gpus)
if n_gpus == 0:
print("No GPUs detected", flush=True)
enable_captions = False
gpu_id = None
load_8bit = False
load_4bit = False
load_half = False
load_gptq = ''
load_exllama = False
use_safetensors = False
revision = None
use_gpu_id = False
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = False
torch.set_default_dtype(torch.float32)
if psutil.virtual_memory().available < 94 * 1024 ** 3 and not inference_server and not model_lock:
# 12B uses ~94GB
# 6.9B uses ~47GB
base_model = 'h2oai/h2ogpt-oig-oasst1-512-6_9b' if not base_model else base_model
if hf_embedding_model is None:
# if no GPUs, use simpler embedding model to avoid cost in time
hf_embedding_model = "sentence-transformers/all-MiniLM-L6-v2"
if score_model == 'auto':
score_model = ''
else:
if score_model == 'auto':
if n_gpus >= 2:
# will by default place scoring model on last GPU
score_model = 'OpenAssistant/reward-model-deberta-v3-large-v2'
else:
score_model = ''
if hf_embedding_model is None:
# if still None, then set default
hf_embedding_model = 'hkunlp/instructor-large'
# get defaults
if base_model:
model_lower = base_model.lower()
elif model_lock:
# have 0th model be thought of as normal model
assert len(model_lock) > 0 and model_lock[0]['base_model']
model_lower = model_lock[0]['base_model'].lower()
else:
model_lower = ''
if not gradio:
# force, else not single response like want to look at
stream_output = False
# else prompt removal can mess up output
chat = False
# hard-coded defaults
first_para = False
text_limit = None
if offload_folder:
offload_folder = makedirs(offload_folder, exist_ok=True, tmp_ok=True)
placeholder_instruction, placeholder_input, \
stream_output, show_examples, \
prompt_type, prompt_dict, \
temperature, top_p, top_k, num_beams, \
max_new_tokens, min_new_tokens, early_stopping, max_time, \
repetition_penalty, num_return_sequences, \
do_sample, \
src_lang, tgt_lang, \
examples, \
task_info = \
get_generate_params(model_lower,
chat,
stream_output, show_examples,
prompt_type, prompt_dict,
temperature, top_p, top_k, num_beams,
max_new_tokens, min_new_tokens, early_stopping, max_time,
repetition_penalty, num_return_sequences,
do_sample,
top_k_docs,
chunk,
chunk_size,
verbose,
)
git_hash = get_githash() if is_public or os.getenv('GET_GITHASH') else "GET_GITHASH"
locals_dict = locals()
locals_print = '\n'.join(['%s: %s' % (k, v) for k, v in locals_dict.items()])
if verbose:
print(f"Generating model with params:\n{locals_print}", flush=True)
print("Command: %s\nHash: %s" % (str(' '.join(sys.argv)), git_hash), flush=True)
if langchain_mode != "Disabled":
# SECOND PLACE where LangChain referenced, but all imports are kept local so not required
from gpt_langchain import prep_langchain, get_some_dbs_from_hf
if is_hf:
get_some_dbs_from_hf()
dbs = {}
for langchain_mode1 in visible_langchain_modes:
if langchain_mode1 in ['MyData']: # FIXME: Remove other custom temp dbs
# don't use what is on disk, remove it instead
for gpath1 in glob.glob(os.path.join(scratch_base_dir, 'db_dir_%s*' % langchain_mode1)):
if os.path.isdir(gpath1):
print("Removing old MyData: %s" % gpath1, flush=True)
remove(gpath1)
continue
if langchain_mode1 in ['All']:
# FIXME: All should be avoided until scans over each db, shouldn't be separate db
continue
persist_directory1 = 'db_dir_%s' % langchain_mode1 # single place, no special names for each case
try:
db = prep_langchain(persist_directory1,
load_db_if_exists,
db_type, use_openai_embedding,
langchain_mode1, langchain_mode_paths,
hf_embedding_model,
kwargs_make_db=locals())
finally:
# in case updated embeddings or created new embeddings
clear_torch_cache()
dbs[langchain_mode1] = db
# remove None db's so can just rely upon k in dbs for if hav db
dbs = {k: v for k, v in dbs.items() if v is not None}
else:
dbs = {}
# import control
if os.environ.get("TEST_LANGCHAIN_IMPORT"):
assert 'gpt_langchain' not in sys.modules, "Dev bug, import of langchain when should not have"
assert 'langchain' not in sys.modules, "Dev bug, import of langchain when should not have"
model_state_none = dict(model=None, tokenizer=None, device=None,
base_model=None, tokenizer_base_model=None, lora_weights=None,
inference_server=None, prompt_type=None, prompt_dict=None)
my_db_state0 = {LangChainMode.MY_DATA.value: [None, None]}
selection_docs_state0 = dict(visible_langchain_modes=visible_langchain_modes,
langchain_mode_paths=langchain_mode_paths,
langchain_modes=langchain_modes)
selection_docs_state = selection_docs_state0
langchain_modes0 = langchain_modes
langchain_mode_paths0 = langchain_mode_paths
visible_langchain_modes0 = visible_langchain_modes
if cli:
from cli import run_cli
return run_cli(**get_kwargs(run_cli, exclude_names=['model_state0'], **locals()))
elif not gradio:
from eval import run_eval
return run_eval(**get_kwargs(run_eval, exclude_names=['model_state0'], **locals()))
elif gradio:
# imported here so don't require gradio to run generate
from gradio_runner import go_gradio
# get default model
model_states = []
model_list = [dict(base_model=base_model, tokenizer_base_model=tokenizer_base_model, lora_weights=lora_weights,
inference_server=inference_server, prompt_type=prompt_type, prompt_dict=prompt_dict)]
model_list0 = copy.deepcopy(model_list) # just strings, safe to deepcopy
model_state0 = model_state_none.copy()
assert len(model_state_none) == len(model_state0)
if model_lock:
model_list = model_lock
for model_dict in reversed(model_list):
# do reverse, so first is default base_model etc., so some logic works in go_gradio() more easily
# handles defaults user didn't have to pass
model_dict['base_model'] = base_model1 = model_dict.get('base_model', '')
model_dict['tokenizer_base_model'] = tokenizer_base_model1 = model_dict.get('tokenizer_base_model', '')
model_dict['lora_weights'] = lora_weights1 = model_dict.get('lora_weights', '')
model_dict['inference_server'] = inference_server1 = model_dict.get('inference_server', '')
prompt_type1 = model_dict.get('prompt_type', model_list0[0]['prompt_type']) # don't use mutated value
# try to infer, ignore empty initial state leading to get_generate_params -> 'plain'
if model_dict.get('prompt_type') is None:
model_lower1 = base_model1.lower()
if model_lower1 in inv_prompt_type_to_model_lower:
prompt_type1 = inv_prompt_type_to_model_lower[model_lower1]
prompt_dict1, error0 = get_prompt(prompt_type1, '',
chat=False, context='', reduced=False, making_context=False,
return_dict=True)
else:
prompt_dict1 = prompt_dict
else:
prompt_dict1 = prompt_dict
model_dict['prompt_type'] = prompt_type1
model_dict['prompt_dict'] = prompt_dict1 = model_dict.get('prompt_dict', prompt_dict1)
all_kwargs = locals().copy()
all_kwargs.update(dict(base_model=base_model1, tokenizer_base_model=tokenizer_base_model1,
lora_weights=lora_weights1, inference_server=inference_server1))
if base_model1 and not login_mode_if_model0:
model0, tokenizer0, device = get_model(reward_type=False,
**get_kwargs(get_model, exclude_names=['reward_type'],
**all_kwargs))
else:
# if empty model, then don't load anything, just get gradio up
model0, tokenizer0, device = None, None, None
if model0 is None:
if fail_if_cannot_connect:
raise RuntimeError("Could not connect, see logs")
# skip
if isinstance(model_lock, list):
model_lock.remove(model_dict)
continue
model_state_trial = dict(model=model0, tokenizer=tokenizer0, device=device)
model_state_trial.update(model_dict)
assert len(model_state_none) == len(model_state_trial)
print("Model %s" % model_dict, flush=True)
if model_lock:
# last in iteration will be first
model_states.insert(0, model_state_trial)
# fill model_state0 so go_gradio() easier, manage model_states separately
model_state0 = model_state_trial.copy()
else:
model_state0 = model_state_trial.copy()
assert len(model_state_none) == len(model_state0)
# get score model
all_kwargs = locals().copy()
smodel, stokenizer, sdevice = get_score_model(reward_type=True,
**get_kwargs(get_score_model, exclude_names=['reward_type'],
**all_kwargs))
score_model_state0 = dict(model=smodel, tokenizer=stokenizer, device=sdevice,
base_model=score_model, tokenizer_base_model='', lora_weights='',
inference_server='', prompt_type='', prompt_dict='')
if enable_captions:
if pre_load_caption_model:
from image_captions import H2OImageCaptionLoader
caption_loader = H2OImageCaptionLoader(caption_gpu=caption_gpu).load_model()
else:
caption_loader = 'gpu' if caption_gpu else 'cpu'
else:
caption_loader = False
# assume gradio needs everything
go_gradio(**locals())
def get_config(base_model,
use_auth_token=False,
trust_remote_code=True,
offload_folder=None,
revision=None,
rope_scaling=None,
triton_attn=False,
long_sequence=True,
return_model=False,
raise_exception=False,
max_seq_len=None,
verbose=False,
):
from accelerate import init_empty_weights
with init_empty_weights():
from transformers import AutoConfig
try:
config = AutoConfig.from_pretrained(base_model, use_auth_token=use_auth_token,
trust_remote_code=trust_remote_code,
offload_folder=offload_folder,
revision=revision,
rope_scaling=rope_scaling)
except OSError as e:
if raise_exception:
raise
if 'not a local folder and is not a valid model identifier listed on' in str(
e) or '404 Client Error' in str(e):
# e.g. llama, gpjt, etc.
# e.g. HF TGI but not model on HF or private etc.
if max_seq_len is None and base_model.lower() in non_hf_types:
print("Could not determine --max_seq_len, setting to 2048. Pass if not correct", flush=True)
max_seq_len = 2048
# HF TGI server only should really require prompt_type, not HF model state
return None, None, max_seq_len
else:
raise
if triton_attn and 'mpt-' in base_model.lower():
config.attn_config['attn_impl'] = 'triton'
if long_sequence:
if 'mpt-7b-storywriter' in base_model.lower():
config.update({"max_seq_len": 83968})
if 'mosaicml/mpt-7b-chat' in base_model.lower():
config.update({"max_seq_len": 4096})
if 'mpt-30b' in base_model.lower():
config.update({"max_seq_len": 2 * 8192})
if return_model and \
issubclass(config.__class__, tuple(AutoModel._model_mapping.keys())):
model = AutoModel.from_config(
config,
trust_remote_code=trust_remote_code,
)
else:
# can't infer
model = None
if 'falcon' in base_model.lower():
config.use_cache = False
# allow override
if max_seq_len is not None:
print("Overriding max_seq_len %d -> %d" % (max_seq_len, max_seq_len), flush=True)
else:
if hasattr(config, 'max_seq_len'):
max_seq_len = int(config.max_seq_len)
elif hasattr(config, 'max_position_embeddings') and isinstance(config.max_position_embeddings, int):
# help automatically limit inputs to generate
max_seq_len = config.max_position_embeddings
if verbose:
print("Used max_position_embeddings=%s as base model (pre-rope) max_seq_len."
" If not desired, pass --max_seq_len and set to some integer value." % config.max_position_embeddings,
flush=True)
else:
print("Could not determine --max_seq_len, setting to 2048. Pass if not correct", flush=True)
max_seq_len = 2048
# FIXME:
# raise RuntimeError("Could not determine max_seq_len,"
# " please pass --max_seq_len and set to some value, e.g. 2048.")
if rope_scaling:
if rope_scaling.get('factor'):
# HF transformers
max_seq_len *= rope_scaling.get('factor')
elif rope_scaling.get('alpha_value'):
# exllama
# Note: exllama's own tokenizer has this set correctly in loaders.py, this config will be unused
max_seq_len *= rope_scaling.get('alpha_value')
print("Automatically setting max_seq_len=%d for RoPE scaling" % max_seq_len, flush=True)
return config, model, max_seq_len
def get_non_lora_model(base_model, model_loader, load_half,
load_gptq,
load_exllama,
use_safetensors,
revision,
model_kwargs, reward_type,
config, model,
gpu_id=0,
):
"""
Ensure model gets on correct device
"""
if model is not None:
# NOTE: Can specify max_memory={0: max_mem, 1: max_mem}, to shard model
# NOTE: Some models require avoiding sharding some layers,
# then would pass no_split_module_classes and give list of those layers.
from accelerate import infer_auto_device_map
device_map = infer_auto_device_map(
model,
dtype=torch.float16 if load_half else torch.float32,
)
if hasattr(model, 'model'):
device_map_model = infer_auto_device_map(
model.model,
dtype=torch.float16 if load_half else torch.float32,
)
device_map.update(device_map_model)
else:
device_map = "auto"
n_gpus = torch.cuda.device_count() if torch.cuda.is_available else 0
if n_gpus > 0:
if gpu_id >= 0:
# FIXME: If really distributes model, tend to get things like: ValueError: gpt_neox.embed_in.weight doesn't have any device set.
# So avoid for now, just put on first GPU, unless score_model, put on last
if reward_type:
device_map = {'': n_gpus - 1}
else:
device_map = {'': min(n_gpus - 1, gpu_id)}
if gpu_id == -1:
device_map = {'': 'cuda'}
else:
device_map = {'': 'cpu'}
model_kwargs['load_in_8bit'] = False
model_kwargs['load_in_4bit'] = False
print('device_map: %s' % device_map, flush=True)
load_in_8bit = model_kwargs.get('load_in_8bit', False)
load_in_4bit = model_kwargs.get('load_in_4bit', False)
model_kwargs['device_map'] = device_map
model_kwargs['use_safetensors'] = use_safetensors
model_kwargs['revision'] = revision
pop_unused_model_kwargs(model_kwargs)
if load_exllama:
model = model_loader
elif load_gptq:
if 'Llama-2-70B-chat-GPTQ' in base_model:
model_kwargs.update(dict(inject_fused_attention=False))
model_kwargs.pop('torch_dtype', None)
model_kwargs.pop('device_map')
model = model_loader(
model_name_or_path=base_model,
model_basename=load_gptq,
**model_kwargs,
)
elif load_in_8bit or load_in_4bit or not load_half:
model = model_loader(
base_model,
config=config,
**model_kwargs,
)
else:
model = model_loader(
base_model,
config=config,
**model_kwargs,
).half()
return model
def get_client_from_inference_server(inference_server, base_model=None, raise_connection_exception=False):
inference_server, headers = get_hf_server(inference_server)
# preload client since slow for gradio case especially
from gradio_utils.grclient import GradioClient
gr_client = None
hf_client = None
if headers is None:
try:
print("GR Client Begin: %s %s" % (inference_server, base_model), flush=True)
# first do sanity check if alive, else gradio client takes too long by default
requests.get(inference_server, timeout=int(os.getenv('REQUEST_TIMEOUT', '30')))
gr_client = GradioClient(inference_server)
print("GR Client End: %s" % inference_server, flush=True)
except (OSError, ValueError) as e:
# Occurs when wrong endpoint and should have been HF client, so don't hard raise, just move to HF
gr_client = None
print("GR Client Failed %s %s: %s" % (inference_server, base_model, str(e)), flush=True)
except (ConnectTimeoutError, ConnectTimeout, MaxRetryError, ConnectionError, ConnectionError2,
JSONDecodeError, ReadTimeout2, KeyError) as e:
t, v, tb = sys.exc_info()
ex = ''.join(traceback.format_exception(t, v, tb))