Upload to Wan-AI/Wan2.2-Animate-14B on ModelScope hub

This commit is contained in:
kelseye
2025-09-11 12:37:22 +00:00
parent 777afb6458
commit 9f0313da34
34 changed files with 5148 additions and 0 deletions

22
.gitattributes vendored
View File

@ -45,3 +45,25 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.wasm filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
models_t5_umt5-xxl-enc-bf16.pth filter=lfs diff=lfs merge=lfs -text
google/umt5-xxl/tokenizer.json filter=lfs diff=lfs merge=lfs -text
Wan2.1_VAE.pth filter=lfs diff=lfs merge=lfs -text
google/umt5-xxl/spiece.model filter=lfs diff=lfs merge=lfs -text
xlm-roberta-large/flax_model.msgpack filter=lfs diff=lfs merge=lfs -text
xlm-roberta-large/model.safetensors filter=lfs diff=lfs merge=lfs -text
diffusion_pytorch_model-00004-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
xlm-roberta-large/onnx/tokenizer.json filter=lfs diff=lfs merge=lfs -text
xlm-roberta-large/onnx/sentencepiece.bpe.model filter=lfs diff=lfs merge=lfs -text
xlm-roberta-large/tf_model.h5 filter=lfs diff=lfs merge=lfs -text
xlm-roberta-large/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
xlm-roberta-large/sentencepiece.bpe.model filter=lfs diff=lfs merge=lfs -text
xlm-roberta-large/onnx/model.onnx_data filter=lfs diff=lfs merge=lfs -text
diffusion_pytorch_model-00001-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
diffusion_pytorch_model-00003-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
diffusion_pytorch_model-00002-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
relighting_lora/adapter_model.safetensors filter=lfs diff=lfs merge=lfs -text
xlm-roberta-large/onnx/model.onnx filter=lfs diff=lfs merge=lfs -text
xlm-roberta-large/tokenizer.json filter=lfs diff=lfs merge=lfs -text
models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth filter=lfs diff=lfs merge=lfs -text
relighting_lora.ckpt filter=lfs diff=lfs merge=lfs -text

3
Wan2.1_VAE.pth Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:acf6c5aa49ad281d4b561e10656e2397c446a8ba4b8d8f19d3dd125c2628bc6a
size 507609928

30
config.json Normal file
View File

@ -0,0 +1,30 @@
{
"__name__": "Config: Transformer config for WanAnimateModel",
"_class_name": "WanAnimateModel",
"_diffusers_version": "0.33.1",
"_name_or_path": "Wan-animate",
"cross_attn_norm": true,
"dim": 5120,
"eps": 1e-06,
"ffn_dim": 13824,
"freq_dim": 256,
"in_dim": 36,
"motion_encoder_dim": 512,
"num_heads": 40,
"num_layers": 40,
"out_dim": 16,
"patch_size": [
1,
2,
2
],
"qk_norm": true,
"text_dim": 4096,
"text_len": 512,
"use_context_parallel": false,
"use_img_emb": true,
"window_size": [
-1,
-1
]
}

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:575c2dba750c3b40240fb742a4224453aa97dfbd3c5f5a0086be431cdefdd69c
size 9875257880

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b90b820627d43eeeb1ae0489182f9a8c870374fd72cc99dccb9eddfc2ace8325
size 9975378288

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e2aa343b0ba04f563566e9959a439611a18189cd9accdc04d59681be9ce5be50
size 9954400528

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e5fea7f38aa4cb70ed59a9ecf406d0c05f3dfc85ee70bc83de35679c42642d1b
size 4744748472

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,308 @@
{
"additional_special_tokens": [
"<extra_id_0>",
"<extra_id_1>",
"<extra_id_2>",
"<extra_id_3>",
"<extra_id_4>",
"<extra_id_5>",
"<extra_id_6>",
"<extra_id_7>",
"<extra_id_8>",
"<extra_id_9>",
"<extra_id_10>",
"<extra_id_11>",
"<extra_id_12>",
"<extra_id_13>",
"<extra_id_14>",
"<extra_id_15>",
"<extra_id_16>",
"<extra_id_17>",
"<extra_id_18>",
"<extra_id_19>",
"<extra_id_20>",
"<extra_id_21>",
"<extra_id_22>",
"<extra_id_23>",
"<extra_id_24>",
"<extra_id_25>",
"<extra_id_26>",
"<extra_id_27>",
"<extra_id_28>",
"<extra_id_29>",
"<extra_id_30>",
"<extra_id_31>",
"<extra_id_32>",
"<extra_id_33>",
"<extra_id_34>",
"<extra_id_35>",
"<extra_id_36>",
"<extra_id_37>",
"<extra_id_38>",
"<extra_id_39>",
"<extra_id_40>",
"<extra_id_41>",
"<extra_id_42>",
"<extra_id_43>",
"<extra_id_44>",
"<extra_id_45>",
"<extra_id_46>",
"<extra_id_47>",
"<extra_id_48>",
"<extra_id_49>",
"<extra_id_50>",
"<extra_id_51>",
"<extra_id_52>",
"<extra_id_53>",
"<extra_id_54>",
"<extra_id_55>",
"<extra_id_56>",
"<extra_id_57>",
"<extra_id_58>",
"<extra_id_59>",
"<extra_id_60>",
"<extra_id_61>",
"<extra_id_62>",
"<extra_id_63>",
"<extra_id_64>",
"<extra_id_65>",
"<extra_id_66>",
"<extra_id_67>",
"<extra_id_68>",
"<extra_id_69>",
"<extra_id_70>",
"<extra_id_71>",
"<extra_id_72>",
"<extra_id_73>",
"<extra_id_74>",
"<extra_id_75>",
"<extra_id_76>",
"<extra_id_77>",
"<extra_id_78>",
"<extra_id_79>",
"<extra_id_80>",
"<extra_id_81>",
"<extra_id_82>",
"<extra_id_83>",
"<extra_id_84>",
"<extra_id_85>",
"<extra_id_86>",
"<extra_id_87>",
"<extra_id_88>",
"<extra_id_89>",
"<extra_id_90>",
"<extra_id_91>",
"<extra_id_92>",
"<extra_id_93>",
"<extra_id_94>",
"<extra_id_95>",
"<extra_id_96>",
"<extra_id_97>",
"<extra_id_98>",
"<extra_id_99>",
"<extra_id_100>",
"<extra_id_101>",
"<extra_id_102>",
"<extra_id_103>",
"<extra_id_104>",
"<extra_id_105>",
"<extra_id_106>",
"<extra_id_107>",
"<extra_id_108>",
"<extra_id_109>",
"<extra_id_110>",
"<extra_id_111>",
"<extra_id_112>",
"<extra_id_113>",
"<extra_id_114>",
"<extra_id_115>",
"<extra_id_116>",
"<extra_id_117>",
"<extra_id_118>",
"<extra_id_119>",
"<extra_id_120>",
"<extra_id_121>",
"<extra_id_122>",
"<extra_id_123>",
"<extra_id_124>",
"<extra_id_125>",
"<extra_id_126>",
"<extra_id_127>",
"<extra_id_128>",
"<extra_id_129>",
"<extra_id_130>",
"<extra_id_131>",
"<extra_id_132>",
"<extra_id_133>",
"<extra_id_134>",
"<extra_id_135>",
"<extra_id_136>",
"<extra_id_137>",
"<extra_id_138>",
"<extra_id_139>",
"<extra_id_140>",
"<extra_id_141>",
"<extra_id_142>",
"<extra_id_143>",
"<extra_id_144>",
"<extra_id_145>",
"<extra_id_146>",
"<extra_id_147>",
"<extra_id_148>",
"<extra_id_149>",
"<extra_id_150>",
"<extra_id_151>",
"<extra_id_152>",
"<extra_id_153>",
"<extra_id_154>",
"<extra_id_155>",
"<extra_id_156>",
"<extra_id_157>",
"<extra_id_158>",
"<extra_id_159>",
"<extra_id_160>",
"<extra_id_161>",
"<extra_id_162>",
"<extra_id_163>",
"<extra_id_164>",
"<extra_id_165>",
"<extra_id_166>",
"<extra_id_167>",
"<extra_id_168>",
"<extra_id_169>",
"<extra_id_170>",
"<extra_id_171>",
"<extra_id_172>",
"<extra_id_173>",
"<extra_id_174>",
"<extra_id_175>",
"<extra_id_176>",
"<extra_id_177>",
"<extra_id_178>",
"<extra_id_179>",
"<extra_id_180>",
"<extra_id_181>",
"<extra_id_182>",
"<extra_id_183>",
"<extra_id_184>",
"<extra_id_185>",
"<extra_id_186>",
"<extra_id_187>",
"<extra_id_188>",
"<extra_id_189>",
"<extra_id_190>",
"<extra_id_191>",
"<extra_id_192>",
"<extra_id_193>",
"<extra_id_194>",
"<extra_id_195>",
"<extra_id_196>",
"<extra_id_197>",
"<extra_id_198>",
"<extra_id_199>",
"<extra_id_200>",
"<extra_id_201>",
"<extra_id_202>",
"<extra_id_203>",
"<extra_id_204>",
"<extra_id_205>",
"<extra_id_206>",
"<extra_id_207>",
"<extra_id_208>",
"<extra_id_209>",
"<extra_id_210>",
"<extra_id_211>",
"<extra_id_212>",
"<extra_id_213>",
"<extra_id_214>",
"<extra_id_215>",
"<extra_id_216>",
"<extra_id_217>",
"<extra_id_218>",
"<extra_id_219>",
"<extra_id_220>",
"<extra_id_221>",
"<extra_id_222>",
"<extra_id_223>",
"<extra_id_224>",
"<extra_id_225>",
"<extra_id_226>",
"<extra_id_227>",
"<extra_id_228>",
"<extra_id_229>",
"<extra_id_230>",
"<extra_id_231>",
"<extra_id_232>",
"<extra_id_233>",
"<extra_id_234>",
"<extra_id_235>",
"<extra_id_236>",
"<extra_id_237>",
"<extra_id_238>",
"<extra_id_239>",
"<extra_id_240>",
"<extra_id_241>",
"<extra_id_242>",
"<extra_id_243>",
"<extra_id_244>",
"<extra_id_245>",
"<extra_id_246>",
"<extra_id_247>",
"<extra_id_248>",
"<extra_id_249>",
"<extra_id_250>",
"<extra_id_251>",
"<extra_id_252>",
"<extra_id_253>",
"<extra_id_254>",
"<extra_id_255>",
"<extra_id_256>",
"<extra_id_257>",
"<extra_id_258>",
"<extra_id_259>",
"<extra_id_260>",
"<extra_id_261>",
"<extra_id_262>",
"<extra_id_263>",
"<extra_id_264>",
"<extra_id_265>",
"<extra_id_266>",
"<extra_id_267>",
"<extra_id_268>",
"<extra_id_269>",
"<extra_id_270>",
"<extra_id_271>",
"<extra_id_272>",
"<extra_id_273>",
"<extra_id_274>",
"<extra_id_275>",
"<extra_id_276>",
"<extra_id_277>",
"<extra_id_278>",
"<extra_id_279>",
"<extra_id_280>",
"<extra_id_281>",
"<extra_id_282>",
"<extra_id_283>",
"<extra_id_284>",
"<extra_id_285>",
"<extra_id_286>",
"<extra_id_287>",
"<extra_id_288>",
"<extra_id_289>",
"<extra_id_290>",
"<extra_id_291>",
"<extra_id_292>",
"<extra_id_293>",
"<extra_id_294>",
"<extra_id_295>",
"<extra_id_296>",
"<extra_id_297>",
"<extra_id_298>",
"<extra_id_299>"
],
"bos_token": "<s>",
"eos_token": "</s>",
"pad_token": "<pad>",
"unk_token": "<unk>"
}

BIN
google/umt5-xxl/spiece.model (Stored with Git LFS) Normal file

Binary file not shown.

BIN
google/umt5-xxl/tokenizer.json (Stored with Git LFS) Normal file

Binary file not shown.

File diff suppressed because it is too large Load Diff

BIN
models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth (Stored with Git LFS) Normal file

Binary file not shown.

BIN
models_t5_umt5-xxl-enc-bf16.pth (Stored with Git LFS) Normal file

Binary file not shown.

3
relighting_lora.ckpt Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:49fe0a5a5af97c3f410598ddb146b5f4b3f30b2a526ec8cc32e9f032a1d328f7
size 2873429974

201
relighting_lora/README.md Normal file
View File

@ -0,0 +1,201 @@
---
library_name: peft
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
### Framework versions
- PEFT 0.14.0

View File

@ -0,0 +1,41 @@
{
"alpha_pattern": {},
"auto_mapping": {
"base_model_class": "WanxiangI2VHumanOmniArch",
"parent_library": "hfm.archs.wanxiang_i2v.wanxiang_i2v_human_omni_arch"
},
"base_model_name_or_path": null,
"bias": "none",
"eva_config": null,
"exclude_modules": null,
"fan_in_fan_out": false,
"inference_mode": true,
"init_lora_weights": "gaussian",
"layer_replication": null,
"layers_pattern": null,
"layers_to_transform": null,
"loftq_config": {},
"lora_alpha": 128,
"lora_bias": false,
"lora_dropout": 0.0,
"megatron_config": null,
"megatron_core": "megatron.core",
"modules_to_save": null,
"peft_type": "LORA",
"r": 128,
"rank_pattern": {},
"revision": null,
"target_modules": [
"ffn.0",
"k",
"q",
"v",
"ffn.2",
"v_img",
"o",
"k_img"
],
"task_type": null,
"use_dora": false,
"use_rslora": false
}

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e821a68a66964c4c00dc8b5d06bfc8cc7e24ab77316c547eb41174097394899c
size 2873221376

200
xlm-roberta-large/README.md Normal file
View File

@ -0,0 +1,200 @@
---
tags:
- exbert
language:
- multilingual
- af
- am
- ar
- as
- az
- be
- bg
- bn
- br
- bs
- ca
- cs
- cy
- da
- de
- el
- en
- eo
- es
- et
- eu
- fa
- fi
- fr
- fy
- ga
- gd
- gl
- gu
- ha
- he
- hi
- hr
- hu
- hy
- id
- is
- it
- ja
- jv
- ka
- kk
- km
- kn
- ko
- ku
- ky
- la
- lo
- lt
- lv
- mg
- mk
- ml
- mn
- mr
- ms
- my
- ne
- nl
- no
- om
- or
- pa
- pl
- ps
- pt
- ro
- ru
- sa
- sd
- si
- sk
- sl
- so
- sq
- sr
- su
- sv
- sw
- ta
- te
- th
- tl
- tr
- ug
- uk
- ur
- uz
- vi
- xh
- yi
- zh
license: mit
---
# XLM-RoBERTa (large-sized model)
XLM-RoBERTa model pre-trained on 2.5TB of filtered CommonCrawl data containing 100 languages. It was introduced in the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Conneau et al. and first released in [this repository](https://github.com/pytorch/fairseq/tree/master/examples/xlmr).
Disclaimer: The team releasing XLM-RoBERTa did not write a model card for this model so this model card has been written by the Hugging Face team.
## Model description
XLM-RoBERTa is a multilingual version of RoBERTa. It is pre-trained on 2.5TB of filtered CommonCrawl data containing 100 languages.
RoBERTa is a transformers model pretrained on a large corpus in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts.
More precisely, it was pretrained with the Masked language modeling (MLM) objective. Taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence.
This way, the model learns an inner representation of 100 languages that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled sentences for instance, you can train a standard classifier using the features produced by the XLM-RoBERTa model as inputs.
## Intended uses & limitations
You can use the raw model for masked language modeling, but it's mostly intended to be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?search=xlm-roberta) to look for fine-tuned versions on a task that interests you.
Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification or question answering. For tasks such as text generation, you should look at models like GPT2.
## Usage
You can use this model directly with a pipeline for masked language modeling:
```python
>>> from transformers import pipeline
>>> unmasker = pipeline('fill-mask', model='xlm-roberta-large')
>>> unmasker("Hello I'm a <mask> model.")
[{'score': 0.10563907772302628,
'sequence': "Hello I'm a fashion model.",
'token': 54543,
'token_str': 'fashion'},
{'score': 0.08015287667512894,
'sequence': "Hello I'm a new model.",
'token': 3525,
'token_str': 'new'},
{'score': 0.033413201570510864,
'sequence': "Hello I'm a model model.",
'token': 3299,
'token_str': 'model'},
{'score': 0.030217764899134636,
'sequence': "Hello I'm a French model.",
'token': 92265,
'token_str': 'French'},
{'score': 0.026436051353812218,
'sequence': "Hello I'm a sexy model.",
'token': 17473,
'token_str': 'sexy'}]
```
Here is how to use this model to get the features of a given text in PyTorch:
```python
from transformers import AutoTokenizer, AutoModelForMaskedLM
tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-large')
model = AutoModelForMaskedLM.from_pretrained("xlm-roberta-large")
# prepare input
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
# forward pass
output = model(**encoded_input)
```
### BibTeX entry and citation info
```bibtex
@article{DBLP:journals/corr/abs-1911-02116,
author = {Alexis Conneau and
Kartikay Khandelwal and
Naman Goyal and
Vishrav Chaudhary and
Guillaume Wenzek and
Francisco Guzm{\'{a}}n and
Edouard Grave and
Myle Ott and
Luke Zettlemoyer and
Veselin Stoyanov},
title = {Unsupervised Cross-lingual Representation Learning at Scale},
journal = {CoRR},
volume = {abs/1911.02116},
year = {2019},
url = {http://arxiv.org/abs/1911.02116},
eprinttype = {arXiv},
eprint = {1911.02116},
timestamp = {Mon, 11 Nov 2019 18:38:09 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1911-02116.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<a href="https://huggingface.co/exbert/?model=xlm-roberta-base">
<img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png">
</a>

View File

@ -0,0 +1,25 @@
{
"architectures": [
"XLMRobertaForMaskedLM"
],
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"layer_norm_eps": 1e-05,
"max_position_embeddings": 514,
"model_type": "xlm-roberta",
"num_attention_heads": 16,
"num_hidden_layers": 24,
"output_past": true,
"pad_token_id": 1,
"position_embedding_type": "absolute",
"transformers_version": "4.17.0.dev0",
"type_vocab_size": 1,
"use_cache": true,
"vocab_size": 250002
}

View File

@ -0,0 +1 @@
{"framework": "pytorch", "task": "fill-mask", "allow_remote": true}

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:96d19a73ca044be7c23518d2d23154eb0a1e6fb301d3b086e2d80bdfff1391ce
size 2240584013

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2dfa19f172412917cab174da04b46e2134811b723666965fd0aabd97caa6e23b
size 2244817354

View File

@ -0,0 +1,27 @@
{
"_name_or_path": "xlm-roberta-large",
"architectures": [
"XLMRobertaForMaskedLM"
],
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 0,
"classifier_dropout": null,
"eos_token_id": 2,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"layer_norm_eps": 1e-05,
"max_position_embeddings": 514,
"model_type": "xlm-roberta",
"num_attention_heads": 16,
"num_hidden_layers": 24,
"output_past": true,
"pad_token_id": 1,
"position_embedding_type": "absolute",
"transformers_version": "4.30.2",
"type_vocab_size": 1,
"use_cache": true,
"vocab_size": 250002
}

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:bb5a52503a3ef35247f5b5ae6c473aaae60505dd3ffaef56d7b69e2f84683c05
size 545850

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1798dab29db9d3fe4193ffa091512730369bd1c1c2041de430e09394d4f57df1
size 2235363328

BIN
xlm-roberta-large/onnx/sentencepiece.bpe.model (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,15 @@
{
"bos_token": "<s>",
"cls_token": "<s>",
"eos_token": "</s>",
"mask_token": {
"content": "<mask>",
"lstrip": true,
"normalized": false,
"rstrip": false,
"single_word": false
},
"pad_token": "<pad>",
"sep_token": "</s>",
"unk_token": "<unk>"
}

BIN
xlm-roberta-large/onnx/tokenizer.json (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,19 @@
{
"bos_token": "<s>",
"clean_up_tokenization_spaces": true,
"cls_token": "<s>",
"eos_token": "</s>",
"mask_token": {
"__type": "AddedToken",
"content": "<mask>",
"lstrip": true,
"normalized": true,
"rstrip": false,
"single_word": false
},
"model_max_length": 512,
"pad_token": "<pad>",
"sep_token": "</s>",
"tokenizer_class": "XLMRobertaTokenizer",
"unk_token": "<unk>"
}

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:01e55aa45dbb9164fee19aef60007a1c91d175051c01be1fb15056cfa60f3e53
size 2244861551

BIN
xlm-roberta-large/sentencepiece.bpe.model (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a465c8d459fe83e10db5655221e2e7e7b6df3de2216c524399358d17ac7315ea
size 2240076248

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a898ea75433890f6610f4e470b8ebeb0c21dce5c8dd61f892eb09eb5919d2e2c
size 9096718