mirror of
https://www.modelscope.cn/black-forest-labs/FLUX.1-dev.git
synced 2026-04-03 00:32:53 +08:00
Add diffusers format weights (#3)
- Add diffusers format weights (5d4717a3dc82fa40a286f48bb71f1ead5c517800) - update readme with diffusers goodies (97a16425ff5c88f218d76851f32820e703ea813d) - Update README.md (9ae394376d269f108989e648378f3ee54cfe7d7b) - Upload folder using huggingface_hub (f2a94b28d7167624c443a40aaa0334351bd62c02) - update scheduler (fe1bfe0b434c9a621a4d4ddad4f57204dcf55d23) Co-authored-by: Dhruv Nair <dn6@users.noreply.huggingface.co> Co-authored-by: Apolinário from multimodal AI art <multimodalart@users.noreply.huggingface.co>
This commit is contained in:
48895
tokenizer/merges.txt
Normal file
48895
tokenizer/merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
30
tokenizer/special_tokens_map.json
Normal file
30
tokenizer/special_tokens_map.json
Normal file
@ -0,0 +1,30 @@
|
||||
{
|
||||
"bos_token": {
|
||||
"content": "<|startoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": true,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"eos_token": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"pad_token": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
},
|
||||
"unk_token": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false
|
||||
}
|
||||
}
|
||||
30
tokenizer/tokenizer_config.json
Normal file
30
tokenizer/tokenizer_config.json
Normal file
@ -0,0 +1,30 @@
|
||||
{
|
||||
"add_prefix_space": false,
|
||||
"added_tokens_decoder": {
|
||||
"49406": {
|
||||
"content": "<|startoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": true,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
},
|
||||
"49407": {
|
||||
"content": "<|endoftext|>",
|
||||
"lstrip": false,
|
||||
"normalized": false,
|
||||
"rstrip": false,
|
||||
"single_word": false,
|
||||
"special": true
|
||||
}
|
||||
},
|
||||
"bos_token": "<|startoftext|>",
|
||||
"clean_up_tokenization_spaces": true,
|
||||
"do_lower_case": true,
|
||||
"eos_token": "<|endoftext|>",
|
||||
"errors": "replace",
|
||||
"model_max_length": 77,
|
||||
"pad_token": "<|endoftext|>",
|
||||
"tokenizer_class": "CLIPTokenizer",
|
||||
"unk_token": "<|endoftext|>"
|
||||
}
|
||||
49410
tokenizer/vocab.json
Normal file
49410
tokenizer/vocab.json
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user