Add files using upload-large-folder tool
Browse files- .gitignore +1 -0
- LICENSE +21 -0
- README.md +151 -3
- config.json +51 -0
- configuration_longcat_ngram.py +216 -0
- generation_config.json +7 -0
- model-00000-of-00038.safetensors +3 -0
- model-00001-of-00038.safetensors +3 -0
- model-00002-of-00038.safetensors +3 -0
- model-00003-of-00038.safetensors +3 -0
- model-00004-of-00038.safetensors +3 -0
- model-00005-of-00038.safetensors +3 -0
- model-00006-of-00038.safetensors +3 -0
- model-00007-of-00038.safetensors +3 -0
- model-00008-of-00038.safetensors +3 -0
- model-00009-of-00038.safetensors +3 -0
- model-00010-of-00038.safetensors +3 -0
- model-00011-of-00038.safetensors +3 -0
- model-00012-of-00038.safetensors +3 -0
- model-00013-of-00038.safetensors +3 -0
- model-00014-of-00038.safetensors +3 -0
- model-00015-of-00038.safetensors +3 -0
- model-00016-of-00038.safetensors +3 -0
- model-00017-of-00038.safetensors +3 -0
- model-00018-of-00038.safetensors +3 -0
- model-00019-of-00038.safetensors +3 -0
- model-00020-of-00038.safetensors +3 -0
- model-00021-of-00038.safetensors +3 -0
- model-00022-of-00038.safetensors +3 -0
- model-00023-of-00038.safetensors +3 -0
- model-00024-of-00038.safetensors +3 -0
- model-00025-of-00038.safetensors +3 -0
- model-00026-of-00038.safetensors +3 -0
- model-00027-of-00038.safetensors +3 -0
- model-00028-of-00038.safetensors +3 -0
- model-00029-of-00038.safetensors +3 -0
- model-00030-of-00038.safetensors +3 -0
- model-00031-of-00038.safetensors +3 -0
- model-00032-of-00038.safetensors +3 -0
- model-00033-of-00038.safetensors +3 -0
- model-00034-of-00038.safetensors +3 -0
- model-00035-of-00038.safetensors +3 -0
- model-00036-of-00038.safetensors +3 -0
- model-00037-of-00038.safetensors +3 -0
- model.safetensors.index.json +0 -0
- model_hashes.txt +38 -0
- modeling_longcat_ngram.py +338 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer_config.json +42 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
*.safetensors
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2026 Meituan
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in
|
| 13 |
+
all copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
CHANGED
|
@@ -1,3 +1,151 @@
|
|
| 1 |
-
---
|
| 2 |
-
license: mit
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
library_name: LongCat-Flash-Lite
|
| 4 |
+
pipeline_tag: text-generation
|
| 5 |
+
tags:
|
| 6 |
+
- transformers
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
# LongCat-Flash-Lite
|
| 10 |
+
|
| 11 |
+
<div align="center">
|
| 12 |
+
<img src="https://raw.githubusercontent.com/meituan-longcat/LongCat-Flash-Chat/main/figures/longcat_logo.svg"
|
| 13 |
+
width="300"
|
| 14 |
+
alt="LongCat Logo"/>
|
| 15 |
+
</div>
|
| 16 |
+
|
| 17 |
+
<hr>
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
<div align="center" style="line-height: 1;">
|
| 21 |
+
|
| 22 |
+
<a href="https://longcat.ai/" target="_blank" style="margin: 2px;">
|
| 23 |
+
<img alt="Chat" src="https://img.shields.io/badge/🤖%20Chat-LongCat--Flash--Chat-ADFF2F?color=29E154&logoColor=white" fill-opacity="1" style="display: inline-block; vertical-align: middle;"/>
|
| 24 |
+
</a>
|
| 25 |
+
<a href="https://huggingface.co/meituan-longcat" target="_blank" style="margin: 2px;">
|
| 26 |
+
<img alt="Hugging Face" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-LongCat-ffc107?color=ffc107&logoColor=white" style="display: inline-block; vertical-align: middle;"/>
|
| 27 |
+
</a>
|
| 28 |
+
</div>
|
| 29 |
+
|
| 30 |
+
<div align="center" style="line-height: 1;">
|
| 31 |
+
<a href="https://github.com/meituan-longcat/LongCat-Flash-Chat/blob/main/figures/wechat_official_accounts.png" target="_blank" style="margin: 2px;">
|
| 32 |
+
<img alt="Wechat" src="https://img.shields.io/badge/WeChat-LongCat-brightgreen?logo=wechat&logoColor=white" style="display: inline-block; vertical-align: middle;"/>
|
| 33 |
+
</a>
|
| 34 |
+
<a href="https://x.com/Meituan_LongCat" target="_blank" style="margin: 2px;">
|
| 35 |
+
<img alt="Twitter Follow" src="https://img.shields.io/badge/Twitter-LongCat-white?logo=x&logoColor=white" style="display: inline-block; vertical-align: middle;"/>
|
| 36 |
+
</a>
|
| 37 |
+
</div>
|
| 38 |
+
|
| 39 |
+
<div align="center" style="line-height: 1;">
|
| 40 |
+
<a href="https://huggingface.co/meituan-longcat/LongCat-Flash-Chat/blob/main/LICENSE" style="margin: 2px;">
|
| 41 |
+
<img alt="License" src="https://img.shields.io/badge/License-MIT-f5de53?&color=f5de53" style="display: inline-block; vertical-align: middle;"/>
|
| 42 |
+
</a>
|
| 43 |
+
</div>
|
| 44 |
+
|
| 45 |
+
<p align="center">
|
| 46 |
+
<a href="https://xx.xx.xx"><b>Tech Report</b> 📄</a>
|
| 47 |
+
</p>
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
## Model Introduction
|
| 51 |
+
We introduce LongCat-Flash-Lite, a 68.5B parameter model with ∼3B activated trained from scratch. Despite allocating over 30B parameters to embeddings, LongCat-Flash-Lite not only surpasses parameter-equivalent MoE baselines but also exhibits exceptional competitiveness against existing models of comparable scale, particularly in agentic and coding domains.
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
### Key Features
|
| 56 |
+
|
| 57 |
+
#### 🌟 Superior Scaling Efficiency: A Better Alternative to MoE
|
| 58 |
+
Through comprehensive scaling experiments across diverse scenarios, we identify specific regimes where embedding scaling achieves a superior Pareto frontier compared toincrease expert numbers, offering a high-efficiency alternative for model scaling.
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
#### 🌟 Optimized Architecture with N-gram Embeddings
|
| 62 |
+
We establish the complete set of architectural factors determining
|
| 63 |
+
embedding scaling efficacy, covering the integration timing, parameter budgeting, hash collisions, hyperparameter
|
| 64 |
+
settings and initialization of embedding, together with the effects of model width and depth. Besides, we investigate
|
| 65 |
+
different methods of scaling embedding and find that N-gram Embedding offers the most robust scalability.
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
#### 🌟 Inference Efficiency and System Optimization
|
| 69 |
+
We demonstrate that N-gram Embedding largely reduce I/O bottlenecks in MoE layers, particularly when paired with speculative decoding to maximize hardware utilization. Addressing the concomitant embedding overhead, we propose a specialized N-gram Cache and synchronized kernels, ensuring that the reduction in active parameters translates directly to lower latency and higher throughput.
|
| 70 |
+
|
| 71 |
+
For more detail, please refer to [***Scaling Embeddings Outperforms Scaling Experts in Language Models***](https://xx.xx.xx).
|
| 72 |
+
|
| 73 |
+
## Evaluation Results
|
| 74 |
+
Values marked with * are sourced from public reports.
|
| 75 |
+
|
| 76 |
+
| Benchmark | Qwen3-Next-80B-A3B-Instruct | Qwen3-30B-A3B-Instruct-2507 | Gemini 2.5 Flash-Lite Preview (09-2025) | Kimi-Linear-48B-A3B | LongCat |
|
| 77 |
+
|---------|-----------------------------|----------------------------|----------------------------------------|---------------------|---------|
|
| 78 |
+
| **Architecture** | MoE | MoE | - | MoE | MoE + NE |
|
| 79 |
+
| **# Total Params** | 80B | 30B | - | 48B | 68.5B |
|
| 80 |
+
| **# Activated Params** | 3B | 3B | - | 3B | 2.9B∼4.5B |
|
| 81 |
+
|
| 82 |
+
## Agentic Tool Use
|
| 83 |
+
|
| 84 |
+
| Benchmark | Qwen3-Next-80B-A3B-Instruct | Qwen3-30B-A3B-Instruct-2507 | Gemini 2.5 Flash-Lite Preview (09-2025) | Kimi-Linear-48B-A3B | LongCat |
|
| 85 |
+
|---------|-----------------------------|----------------------------|----------------------------------------|---------------------|---------|
|
| 86 |
+
| Tau2-Airline(avg@8) | 45.5* | 38.0* | 35.00 | 44.00 | **58.00** |
|
| 87 |
+
| Tau2-Retail(avg@8) | 57.3* | 57.0* | 37.50 | 18.86 | **73.10** |
|
| 88 |
+
| Tau2-Telecom(avg@8) | 13.2* | 12.3* | 41.0* | 15.68 | **72.80** |
|
| 89 |
+
| VitaBench(avg@4) | 5.80 | 3.50 | 4.50 | - | **7.00** |
|
| 90 |
+
|
| 91 |
+
## Agentic Coding
|
| 92 |
+
|
| 93 |
+
| Benchmark | Qwen3-Next-80B-A3B-Instruct | Qwen3-30B-A3B-Instruct-2507 | Gemini 2.5 Flash-Lite Preview (09-2025) | Kimi-Linear-48B-A3B | LongCat |
|
| 94 |
+
|---------|-----------------------------|----------------------------|----------------------------------------|---------------------|---------|
|
| 95 |
+
| SWE-Bench(acc) | 37.60 | 6.80 | 41.3* | 32.80 | **54.40** |
|
| 96 |
+
| TerminalBench(acc) | 15.19 | 17.72 | 20.00 | 20.00 | **33.75** |
|
| 97 |
+
| SWE-Bench Multiligual | 31.30 | 30.60 | - | 37.20 | |
|
| 98 |
+
|
| 99 |
+
## General Domains
|
| 100 |
+
|
| 101 |
+
| Benchmark | Qwen3-Next-80B-A3B-Instruct | Qwen3-30B-A3B-Instruct-2507 | Gemini 2.5 Flash-Lite Preview (09-2025) | Kimi-Linear-48B-A3B | LongCat |
|
| 102 |
+
|---------|-----------------------------|----------------------------|----------------------------------------|---------------------|---------|
|
| 103 |
+
| GPQA-Diamond(avg@16) | **74.33** | 66.86 | 70.20* | 69.89 | 66.78 |
|
| 104 |
+
| MMLU(acc) | **89.28** | 86.92 | 84.68 | 79.91 | 85.52 |
|
| 105 |
+
| MMLU-Pro(acc) | **82.93** | 80.28 | 78.95 | 67.22 | 78.29 |
|
| 106 |
+
| CEval(acc) | **90.91** | 90.05 | 75.16 | 78.48 | 86.55 |
|
| 107 |
+
| CMMLU(acc) | **86.50** | 84.98 | 72.06 | 76.26 | 82.48 |
|
| 108 |
+
|
| 109 |
+
## Mathematical Reasoning
|
| 110 |
+
|
| 111 |
+
| Benchmark | Qwen3-Next-80B-A3B-Instruct | Qwen3-30B-A3B-Instruct-2507 | Gemini 2.5 Flash-Lite Preview (09-2025) | Kimi-Linear-48B-A3B | LongCat |
|
| 112 |
+
|---------|-----------------------------|----------------------------|----------------------------------------|---------------------|---------|
|
| 113 |
+
| MATH500(acc) | **98.00** | 97.00 | 95.20 | 94.20 | 96.80 |
|
| 114 |
+
| AIME24(avg@32) | **81.35** | 71.35 | 63.33 | 70.52 | 72.19 |
|
| 115 |
+
| AIME25(avg@32) | **68.44** | 62.08 | 50.1* | 59.58 | 63.23 |
|
| 116 |
+
|
| 117 |
+
## Quick Start
|
| 118 |
+
```shell
|
| 119 |
+
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
## License Agreement
|
| 124 |
+
|
| 125 |
+
This repository, including both the model weights and the source code, is released under the **MIT License**.
|
| 126 |
+
|
| 127 |
+
Any contributions to this repository are licensed under the MIT License, unless otherwise stated. This license does not grant any rights to use Meituan trademarks or patents.
|
| 128 |
+
|
| 129 |
+
For details, see the [LICENSE](./LICENSE) file.
|
| 130 |
+
|
| 131 |
+
## Usage Considerations
|
| 132 |
+
This model has not been specifically designed or comprehensively evaluated for every possible downstream application.
|
| 133 |
+
|
| 134 |
+
Developers should take into account the known limitations of large language models, including performance variations across different languages, and carefully assess accuracy, safety, and fairness before deploying the model in sensitive or high-risk scenarios.
|
| 135 |
+
It is the responsibility of developers and downstream users to understand and comply with all applicable laws and regulations relevant to their use case, including but not limited to data protection, privacy, and content safety requirements.
|
| 136 |
+
|
| 137 |
+
Nothing in this Model Card should be interpreted as altering or restricting the terms of the MIT License under which the model is released.
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
## Citation
|
| 141 |
+
|
| 142 |
+
We kindly encourage citation of our work if you find it useful.
|
| 143 |
+
|
| 144 |
+
```
|
| 145 |
+
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
## Contact
|
| 150 |
+
Please contact us at <a href="mailto:longcat-team@meituan.com">longcat-team@meituan.com</a> or open an issue if you have any questions.
|
| 151 |
+
|
config.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"LongcatFlashNgramForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"attention_bias": false,
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"auto_map": {
|
| 8 |
+
"AutoConfig": "configuration_longcat_ngram.LongcatFlashNgramConfig",
|
| 9 |
+
"AutoModel": "modeling_longcat_ngram.LongcatFlashNgramModel",
|
| 10 |
+
"AutoModelForCausalLM": "modeling_longcat_ngram.LongcatFlashNgramForCausalLM"
|
| 11 |
+
},
|
| 12 |
+
"vocab_size": 131072,
|
| 13 |
+
"hidden_size": 3072,
|
| 14 |
+
"ffn_hidden_size": 6144,
|
| 15 |
+
"expert_ffn_hidden_size": 1024,
|
| 16 |
+
"num_layers": 14,
|
| 17 |
+
"num_attention_heads": 32,
|
| 18 |
+
"kv_lora_rank": 512,
|
| 19 |
+
"q_lora_rank": 1536,
|
| 20 |
+
"qk_rope_head_dim": 64,
|
| 21 |
+
"v_head_dim": 128,
|
| 22 |
+
"qk_nope_head_dim": 128,
|
| 23 |
+
"mla_scale_q_lora": true,
|
| 24 |
+
"mla_scale_kv_lora": true,
|
| 25 |
+
"routed_scaling_factor": 6.0,
|
| 26 |
+
"n_routed_experts": 256,
|
| 27 |
+
"rms_norm_eps": 1e-5,
|
| 28 |
+
"use_cache": true,
|
| 29 |
+
"bos_token_id": 1,
|
| 30 |
+
"eos_token_id": 2,
|
| 31 |
+
"rope_theta": 5000000.0,
|
| 32 |
+
"max_position_embeddings": 327680,
|
| 33 |
+
"rope_scaling": {
|
| 34 |
+
"original_max_position_embeddings": 32768,
|
| 35 |
+
"rope_type": "yarn",
|
| 36 |
+
"factor": 10,
|
| 37 |
+
"beta_fast": 32,
|
| 38 |
+
"beta_slow": 1,
|
| 39 |
+
"mscale": 1,
|
| 40 |
+
"mscale_all_dim": 1
|
| 41 |
+
},
|
| 42 |
+
"zero_expert_num": 128,
|
| 43 |
+
"zero_expert_type": "identity",
|
| 44 |
+
"moe_topk": 12,
|
| 45 |
+
"ngram_vocab_size_ratio": 78,
|
| 46 |
+
"emb_neighbor_num": 4,
|
| 47 |
+
"emb_split_num": 4,
|
| 48 |
+
"torch_dtype": "bfloat16",
|
| 49 |
+
"transformers_version": "4.57.6"
|
| 50 |
+
}
|
| 51 |
+
|
configuration_longcat_ngram.py
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers.models.longcat_flash import LongcatFlashConfig
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LongcatFlashNgramConfig(LongcatFlashConfig):
|
| 5 |
+
r"""
|
| 6 |
+
This is the configuration class to store the configuration of a [`LongcatFlashNgramModel`]. It is used to instantiate
|
| 7 |
+
a LongCat Flash model with N-gram enhanced embeddings according to the specified arguments, defining the model architecture.
|
| 8 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 9 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
vocab_size (`int`, *optional*, defaults to 131072):
|
| 14 |
+
Vocabulary size of the LongCat Flash model. Defines the number of different tokens that can be represented by the
|
| 15 |
+
`input_ids` passed when calling [`LongcatFlashNgramModel`]
|
| 16 |
+
hidden_size (`int`, *optional*, defaults to 6144):
|
| 17 |
+
Dimension of the hidden representations.
|
| 18 |
+
num_hidden_layers (`int`, *optional*, defaults to 56):
|
| 19 |
+
Number of hidden layers in the Transformer decoder.
|
| 20 |
+
num_layers (`int`, *optional*, defaults to 28):
|
| 21 |
+
Number of layers, each with 2 sublayers.
|
| 22 |
+
num_attention_heads (`int`, *optional*, defaults to 64):
|
| 23 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
| 24 |
+
num_key_value_heads (`int`, *optional*):
|
| 25 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| 26 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| 27 |
+
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| 28 |
+
converting from a multi-head checkpoint to a GQA checkpoint, each group key and value head should be
|
| 29 |
+
constructed by meanpooling all the original heads within that group. For more details checkout [this
|
| 30 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
| 31 |
+
`num_attention_heads`.
|
| 32 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
| 33 |
+
The non-linear activation function (function or string) in the decoder.
|
| 34 |
+
max_position_embeddings (`int`, *optional*, defaults to 131072):
|
| 35 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 36 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 37 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 38 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 39 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
|
| 40 |
+
The epsilon value used by the RMS normalization layers.
|
| 41 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 42 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| 43 |
+
relevant if `config.is_decoder=True`.
|
| 44 |
+
pad_token_id (`int`, *optional*):
|
| 45 |
+
Padding token id.
|
| 46 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
| 47 |
+
Beginning of stream token id.
|
| 48 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
| 49 |
+
End of stream token id.
|
| 50 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 51 |
+
Whether to tie input and output embeddings.
|
| 52 |
+
rope_theta (`float`, *optional*, defaults to 10000000.0):
|
| 53 |
+
The base period of the RoPE embeddings.
|
| 54 |
+
rope_scaling (`Dict`, *optional*):
|
| 55 |
+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
|
| 56 |
+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
|
| 57 |
+
`{"type": strategy name, "factor": scaling factor}`.
|
| 58 |
+
attention_bias (`bool`, *optional*, defaults to `False`):
|
| 59 |
+
Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
| 60 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 61 |
+
The dropout ratio for the attention probabilities.
|
| 62 |
+
ffn_hidden_size (`int`, *optional*, defaults to 12288):
|
| 63 |
+
Dimension of the MLP representations.
|
| 64 |
+
q_lora_rank (`int`, *optional*, defaults to 1536):
|
| 65 |
+
The rank of the query LoRA projection in MLA (Multi-head Latent Attention).
|
| 66 |
+
kv_lora_rank (`int`, *optional*, defaults to 512):
|
| 67 |
+
The rank of the key-value LoRA projection in MLA.
|
| 68 |
+
qk_nope_head_dim (`int`, *optional*, defaults to 128):
|
| 69 |
+
The dimension of the non-position encoding part of query/key heads.
|
| 70 |
+
qk_rope_head_dim (`int`, *optional*, defaults to 64):
|
| 71 |
+
The dimension of the RoPE part of query/key heads.
|
| 72 |
+
head_dim (`int`, *optional*, defaults to 64):
|
| 73 |
+
Standard dimension of qk heads, unused except for CI.
|
| 74 |
+
v_head_dim (`int`, *optional*, defaults to 128):
|
| 75 |
+
The dimension of value heads.
|
| 76 |
+
qk_head_dim (`int`, *optional*):
|
| 77 |
+
The total dimension of query/key heads. If not specified, set to `qk_nope_head_dim + qk_rope_head_dim`.
|
| 78 |
+
moe_topk (`int`, *optional*, defaults to 12):
|
| 79 |
+
Number of experts to route to for each token in the MoE layer.
|
| 80 |
+
n_routed_experts (`int`, *optional*, defaults to 512):
|
| 81 |
+
Number of routed experts in the MoE layer.
|
| 82 |
+
zero_expert_num (`int`, *optional*, defaults to 256):
|
| 83 |
+
Number of zero experts (identity function) to add to the expert pool.
|
| 84 |
+
expert_ffn_hidden_size (`int`, *optional*, defaults to 2048):
|
| 85 |
+
Hidden size of individual expert FFN layers.
|
| 86 |
+
routed_scaling_factor (`float`, *optional*, defaults to 6.0):
|
| 87 |
+
Scaling factor applied to the routing weights.
|
| 88 |
+
emb_neighbor_num (`int`, *optional*):
|
| 89 |
+
Maximum N-gram length for N-gram embeddings. This parameter determines the context window size for N-gram computation. Higher values capture
|
| 90 |
+
longer-range lexical patterns but increase memory usage.
|
| 91 |
+
emb_split_num (`int`, *optional*):
|
| 92 |
+
Number of hash functions (or splits) to use for N-gram embeddings. Multiple hash functions help improve the quality of N-gram representations.
|
| 93 |
+
ngram_vocab_size_ratio (`float`, *optional*):
|
| 94 |
+
Ratio multiplier for N-gram vocabulary size relative to the base vocabulary size. The N-gram vocabulary
|
| 95 |
+
size is calculated as `vocab_size * ngram_vocab_size_ratio`.
|
| 96 |
+
|
| 97 |
+
Example:
|
| 98 |
+
```python
|
| 99 |
+
>>> from transformers import LongcatFlashNgramModel, LongcatFlashNgramConfig
|
| 100 |
+
|
| 101 |
+
>>> # Initializing a LongCat Flash N-gram style configuration
|
| 102 |
+
>>> configuration = LongcatFlashNgramConfig(
|
| 103 |
+
... emb_neighbor_num=3,
|
| 104 |
+
... emb_split_num=4,
|
| 105 |
+
... ngram_vocab_size_ratio=1.5
|
| 106 |
+
... )
|
| 107 |
+
|
| 108 |
+
>>> # Initializing a model from the configuration
|
| 109 |
+
>>> model = LongcatFlashNgramModel(configuration)
|
| 110 |
+
|
| 111 |
+
>>> # Accessing the model configuration
|
| 112 |
+
>>> configuration = model.config
|
| 113 |
+
```"""
|
| 114 |
+
|
| 115 |
+
model_type = "longcat_flash_ngram"
|
| 116 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 117 |
+
base_model_tp_plan = {
|
| 118 |
+
"layers.*.self_attn.*.q_b_proj": "colwise",
|
| 119 |
+
"layers.*.self_attn.*.kv_b_proj": "colwise",
|
| 120 |
+
"layers.*.self_attn.*.o_proj": "rowwise",
|
| 121 |
+
"layers.*.mlps.*.gate_proj": "colwise",
|
| 122 |
+
"layers.*.mlps.*.up_proj": "colwise",
|
| 123 |
+
"layers.*.mlps.*.down_proj": "rowwise",
|
| 124 |
+
"layers.*.mlp.experts.*.gate_proj": "colwise",
|
| 125 |
+
"layers.*.mlp.experts.*.up_proj": "colwise",
|
| 126 |
+
"layers.*.mlp.experts.*.down_proj": "rowwise",
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
base_model_pp_plan = {
|
| 130 |
+
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
|
| 131 |
+
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
|
| 132 |
+
"norm": (["hidden_states"], ["hidden_states"]),
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
def __init__(
|
| 136 |
+
self,
|
| 137 |
+
vocab_size=131072,
|
| 138 |
+
hidden_size=6144,
|
| 139 |
+
num_hidden_layers=56,
|
| 140 |
+
num_layers=28,
|
| 141 |
+
num_attention_heads=64,
|
| 142 |
+
num_key_value_heads=None,
|
| 143 |
+
hidden_act="silu",
|
| 144 |
+
max_position_embeddings=131072,
|
| 145 |
+
initializer_range=0.02,
|
| 146 |
+
rms_norm_eps=1e-5,
|
| 147 |
+
use_cache=True,
|
| 148 |
+
pad_token_id=None,
|
| 149 |
+
bos_token_id=1,
|
| 150 |
+
eos_token_id=2,
|
| 151 |
+
tie_word_embeddings=False,
|
| 152 |
+
rope_theta=10000000.0,
|
| 153 |
+
rope_scaling=None,
|
| 154 |
+
attention_bias=False,
|
| 155 |
+
attention_dropout=0.0,
|
| 156 |
+
ffn_hidden_size=12288,
|
| 157 |
+
q_lora_rank=1536,
|
| 158 |
+
kv_lora_rank=512,
|
| 159 |
+
qk_nope_head_dim=128,
|
| 160 |
+
qk_rope_head_dim=64,
|
| 161 |
+
head_dim=64,
|
| 162 |
+
v_head_dim=128,
|
| 163 |
+
qk_head_dim=None,
|
| 164 |
+
moe_topk=12,
|
| 165 |
+
n_routed_experts=512,
|
| 166 |
+
zero_expert_num=256,
|
| 167 |
+
expert_ffn_hidden_size=2048,
|
| 168 |
+
routed_scaling_factor=6.0,
|
| 169 |
+
emb_neighbor_num=None,
|
| 170 |
+
emb_split_num=None,
|
| 171 |
+
ngram_vocab_size_ratio=None,
|
| 172 |
+
**kwargs,
|
| 173 |
+
):
|
| 174 |
+
# N-gram embedding specific parameters
|
| 175 |
+
self.emb_neighbor_num = emb_neighbor_num
|
| 176 |
+
self.emb_split_num = emb_split_num
|
| 177 |
+
self.ngram_vocab_size_ratio = ngram_vocab_size_ratio
|
| 178 |
+
|
| 179 |
+
super().__init__(
|
| 180 |
+
vocab_size=vocab_size,
|
| 181 |
+
hidden_size=hidden_size,
|
| 182 |
+
num_hidden_layers=num_hidden_layers,
|
| 183 |
+
num_layers=num_layers,
|
| 184 |
+
num_attention_heads=num_attention_heads,
|
| 185 |
+
num_key_value_heads=num_key_value_heads,
|
| 186 |
+
hidden_act=hidden_act,
|
| 187 |
+
max_position_embeddings=max_position_embeddings,
|
| 188 |
+
initializer_range=initializer_range,
|
| 189 |
+
rms_norm_eps=rms_norm_eps,
|
| 190 |
+
use_cache=use_cache,
|
| 191 |
+
pad_token_id=pad_token_id,
|
| 192 |
+
bos_token_id=bos_token_id,
|
| 193 |
+
eos_token_id=eos_token_id,
|
| 194 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 195 |
+
rope_theta=rope_theta,
|
| 196 |
+
rope_scaling=rope_scaling,
|
| 197 |
+
attention_bias=attention_bias,
|
| 198 |
+
attention_dropout=attention_dropout,
|
| 199 |
+
ffn_hidden_size=ffn_hidden_size,
|
| 200 |
+
q_lora_rank=q_lora_rank,
|
| 201 |
+
kv_lora_rank=kv_lora_rank,
|
| 202 |
+
qk_nope_head_dim=qk_nope_head_dim,
|
| 203 |
+
qk_rope_head_dim=qk_rope_head_dim,
|
| 204 |
+
head_dim=head_dim,
|
| 205 |
+
v_head_dim=v_head_dim,
|
| 206 |
+
qk_head_dim=qk_head_dim,
|
| 207 |
+
moe_topk=moe_topk,
|
| 208 |
+
n_routed_experts=n_routed_experts,
|
| 209 |
+
zero_expert_num=zero_expert_num,
|
| 210 |
+
expert_ffn_hidden_size=expert_ffn_hidden_size,
|
| 211 |
+
routed_scaling_factor=routed_scaling_factor,
|
| 212 |
+
**kwargs,
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
__all__ = ["LongcatFlashNgramConfig"]
|
generation_config.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 1,
|
| 4 |
+
"eos_token_id": 2,
|
| 5 |
+
"pad_token_id": 3,
|
| 6 |
+
"transformers_version": "4.55.0"
|
| 7 |
+
}
|
model-00000-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:58fa5258626248582925148ffaec79f752045d76b3073eff9035f78244174e27
|
| 3 |
+
size 5289947960
|
model-00001-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4862c4cc80ac36701781ed02a84822a152e7a285b57cf1260f8fd23209d99cce
|
| 3 |
+
size 5289947968
|
model-00002-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:244ba1cc3f7da33f1b3bfce6eec2a38c1b1410eb1aea22c7796bd0e2bec53dd5
|
| 3 |
+
size 5289946944
|
model-00003-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:73aebcb63519a519befe68cb1c26b05bcc310ee14dfac44ca49b1f18d82c69bf
|
| 3 |
+
size 5289946936
|
model-00004-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ed6f0578b36b47680355b736a48ab732380b77d29d29f8c0e18386397523ca59
|
| 3 |
+
size 5289945920
|
model-00005-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:411ac9baf753252971f9b7072ac08c950ff66c2acd410207a5c48480d898f532
|
| 3 |
+
size 5289945912
|
model-00006-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:367a91bc240583a8a701b41ded917297cebd04113cdd13c3b1703be396a1fdab
|
| 3 |
+
size 5289944880
|
model-00007-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1a66b7d59d3bb94d1874dfa7fc7b91afa2632af4d2dd19b14ee503596b38f4e2
|
| 3 |
+
size 5289944888
|
model-00008-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e10c54ec49ab87bb7abf1b9df07b2767a37a26ddf1c62014d464144c01ff8ea9
|
| 3 |
+
size 5289943856
|
model-00009-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:99d1e0f997b7f6e4cc7c2245649f0facbcef377bf88f883a31f2faa557cd8ac9
|
| 3 |
+
size 5289943880
|
model-00010-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5f964d48cf6d02f34fc4ba242bbe1a11d83c4df5fa660695f9fee53b53b84eab
|
| 3 |
+
size 5289942840
|
model-00011-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:15b59cf9ecdb92b277d5b7a10f25f35db7373daa5050d55a65f8be2a1f283624
|
| 3 |
+
size 5289942848
|
model-00012-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:70511491f956b62c2418781e3d9db56df42510e807735ec6d8f395ccb3340e82
|
| 3 |
+
size 5289941824
|
model-00013-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e614af5d55350bda182d21e2e39a69c4436f416f301bc536a8280d6e5fbbc465
|
| 3 |
+
size 5289941816
|
model-00014-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3ecba8f4358c5fa7a84c4e2a38b32e1c579e0601d661822288cd4ac629b58f71
|
| 3 |
+
size 5289940800
|
model-00015-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c549541ab83b5013190c191de4db8ff7dcabc5fa78cb68796970dabda4a14636
|
| 3 |
+
size 5289940792
|
model-00016-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3647fdc2158e2201da25a6e6b93ebc77d52e085f28995920c94346d79f5da845
|
| 3 |
+
size 5289939768
|
model-00017-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:978483a120263813f2f0d1f0a4f741390202d0364e5c449b79e8f0fc05819a1c
|
| 3 |
+
size 5289939776
|
model-00018-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9372820581b918bfa6866b6628013f331d91946846c65062374750cd5ca16fe9
|
| 3 |
+
size 5289938744
|
model-00019-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4c5828059c2f5e3fb067e07232856190a018863fae38a1e1858fc2dd1fb86ca1
|
| 3 |
+
size 5289938752
|
model-00020-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:94f4f1b53f863bf2e4fb7f817bfd722c4e3f712180a47b4c173efe86f9f708ff
|
| 3 |
+
size 5289544376
|
model-00021-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ae5f05ffeeeceaa0d09a4703340fd7e7834d87d23d8a7b5f928a3a767a4d29cf
|
| 3 |
+
size 5289937728
|
model-00022-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebae9b3f3ff94b0b36b352ec72a0cc228fbe611dc59b3026a44d932961654791
|
| 3 |
+
size 5289543352
|
model-00023-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a8fee6ecd8a561d353cd135a681252f923dc247d27c624d083b45c21354d4080
|
| 3 |
+
size 5289543352
|
model-00024-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad229f37d38cd12675367219d16f803f2a07d5fb71bc84c9d9c62598692a4100
|
| 3 |
+
size 5289630936
|
model-00025-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:139cc01fc2445267a046da2939d589367d17cc146be8259f69bfabc9e23eca63
|
| 3 |
+
size 5289630912
|
model-00026-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:239b6c3f708e901b273711c71f7b85619d462dcca9130e3506d6151f75b5387c
|
| 3 |
+
size 5289630648
|
model-00027-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:982852a74d080f1567e6a02aee0b60ef56bf37f5073cf2ae904bd36561f5f395
|
| 3 |
+
size 5289639576
|
model-00028-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3c91728af1195b4b7172a2190606fd3cfb53604aef9f0a27f951af509824e740
|
| 3 |
+
size 5290032856
|
model-00029-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0cb681b76b8cf18fb5a8ae136923bcb1ff79b2d706b8e73c8677decad7ff5648
|
| 3 |
+
size 5290032792
|
model-00030-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7298c81efffeee772390b48e182e7d033be69c1d5df86c24a54d4f68f93b726a
|
| 3 |
+
size 5290032792
|
model-00031-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2151fa0ae1c16356c7606f7fdd0778aa49f2727f5341b7daa19ac3b8afbf6e0e
|
| 3 |
+
size 5290032848
|
model-00032-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:590f6a7b7577a82bd7edf6434462316190aea55ccf2529053bd3465d0c232e52
|
| 3 |
+
size 5289508552
|
model-00033-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f95d2a591d0ca4cb93db00a13c691f006ac8aaf0a4aeb09fc84447f0176aab10
|
| 3 |
+
size 5289508512
|
model-00034-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ccc5afa878158ddab54d0e8381e4e91ecc524ebd2f99aa28cdb8e71fc6ed155
|
| 3 |
+
size 5289384400
|
model-00035-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:022ef6dfbf2f526e0ef9a66d636ab246675815f9877f6654f45d6cda522ec9e9
|
| 3 |
+
size 5289383248
|
model-00036-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7d14972288a2e73168f1fde0536b53bd9cd4e83daa3902cd8679fce38b8f677e
|
| 3 |
+
size 5290032792
|
model-00037-of-00038.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0c9d09758a0617c2b3ecc401f55141537114cf484b549a161628751ee9b699fa
|
| 3 |
+
size 5290032920
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model_hashes.txt
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
022ef6dfbf2f526e0ef9a66d636ab246675815f9877f6654f45d6cda522ec9e9 ./model-00035-of-00038.safetensors
|
| 2 |
+
0c9d09758a0617c2b3ecc401f55141537114cf484b549a161628751ee9b699fa ./model-00037-of-00038.safetensors
|
| 3 |
+
0cb681b76b8cf18fb5a8ae136923bcb1ff79b2d706b8e73c8677decad7ff5648 ./model-00029-of-00038.safetensors
|
| 4 |
+
139cc01fc2445267a046da2939d589367d17cc146be8259f69bfabc9e23eca63 ./model-00025-of-00038.safetensors
|
| 5 |
+
15b59cf9ecdb92b277d5b7a10f25f35db7373daa5050d55a65f8be2a1f283624 ./model-00011-of-00038.safetensors
|
| 6 |
+
1a66b7d59d3bb94d1874dfa7fc7b91afa2632af4d2dd19b14ee503596b38f4e2 ./model-00007-of-00038.safetensors
|
| 7 |
+
1ccc5afa878158ddab54d0e8381e4e91ecc524ebd2f99aa28cdb8e71fc6ed155 ./model-00034-of-00038.safetensors
|
| 8 |
+
2151fa0ae1c16356c7606f7fdd0778aa49f2727f5341b7daa19ac3b8afbf6e0e ./model-00031-of-00038.safetensors
|
| 9 |
+
239b6c3f708e901b273711c71f7b85619d462dcca9130e3506d6151f75b5387c ./model-00026-of-00038.safetensors
|
| 10 |
+
244ba1cc3f7da33f1b3bfce6eec2a38c1b1410eb1aea22c7796bd0e2bec53dd5 ./model-00002-of-00038.safetensors
|
| 11 |
+
3647fdc2158e2201da25a6e6b93ebc77d52e085f28995920c94346d79f5da845 ./model-00016-of-00038.safetensors
|
| 12 |
+
367a91bc240583a8a701b41ded917297cebd04113cdd13c3b1703be396a1fdab ./model-00006-of-00038.safetensors
|
| 13 |
+
3c91728af1195b4b7172a2190606fd3cfb53604aef9f0a27f951af509824e740 ./model-00028-of-00038.safetensors
|
| 14 |
+
3ecba8f4358c5fa7a84c4e2a38b32e1c579e0601d661822288cd4ac629b58f71 ./model-00014-of-00038.safetensors
|
| 15 |
+
411ac9baf753252971f9b7072ac08c950ff66c2acd410207a5c48480d898f532 ./model-00005-of-00038.safetensors
|
| 16 |
+
4862c4cc80ac36701781ed02a84822a152e7a285b57cf1260f8fd23209d99cce ./model-00001-of-00038.safetensors
|
| 17 |
+
4c5828059c2f5e3fb067e07232856190a018863fae38a1e1858fc2dd1fb86ca1 ./model-00019-of-00038.safetensors
|
| 18 |
+
58fa5258626248582925148ffaec79f752045d76b3073eff9035f78244174e27 ./model-00000-of-00038.safetensors
|
| 19 |
+
590f6a7b7577a82bd7edf6434462316190aea55ccf2529053bd3465d0c232e52 ./model-00032-of-00038.safetensors
|
| 20 |
+
5f964d48cf6d02f34fc4ba242bbe1a11d83c4df5fa660695f9fee53b53b84eab ./model-00010-of-00038.safetensors
|
| 21 |
+
70511491f956b62c2418781e3d9db56df42510e807735ec6d8f395ccb3340e82 ./model-00012-of-00038.safetensors
|
| 22 |
+
7298c81efffeee772390b48e182e7d033be69c1d5df86c24a54d4f68f93b726a ./model-00030-of-00038.safetensors
|
| 23 |
+
73aebcb63519a519befe68cb1c26b05bcc310ee14dfac44ca49b1f18d82c69bf ./model-00003-of-00038.safetensors
|
| 24 |
+
7d14972288a2e73168f1fde0536b53bd9cd4e83daa3902cd8679fce38b8f677e ./model-00036-of-00038.safetensors
|
| 25 |
+
9372820581b918bfa6866b6628013f331d91946846c65062374750cd5ca16fe9 ./model-00018-of-00038.safetensors
|
| 26 |
+
94f4f1b53f863bf2e4fb7f817bfd722c4e3f712180a47b4c173efe86f9f708ff ./model-00020-of-00038.safetensors
|
| 27 |
+
978483a120263813f2f0d1f0a4f741390202d0364e5c449b79e8f0fc05819a1c ./model-00017-of-00038.safetensors
|
| 28 |
+
982852a74d080f1567e6a02aee0b60ef56bf37f5073cf2ae904bd36561f5f395 ./model-00027-of-00038.safetensors
|
| 29 |
+
99d1e0f997b7f6e4cc7c2245649f0facbcef377bf88f883a31f2faa557cd8ac9 ./model-00009-of-00038.safetensors
|
| 30 |
+
a8fee6ecd8a561d353cd135a681252f923dc247d27c624d083b45c21354d4080 ./model-00023-of-00038.safetensors
|
| 31 |
+
ad229f37d38cd12675367219d16f803f2a07d5fb71bc84c9d9c62598692a4100 ./model-00024-of-00038.safetensors
|
| 32 |
+
ae5f05ffeeeceaa0d09a4703340fd7e7834d87d23d8a7b5f928a3a767a4d29cf ./model-00021-of-00038.safetensors
|
| 33 |
+
c549541ab83b5013190c191de4db8ff7dcabc5fa78cb68796970dabda4a14636 ./model-00015-of-00038.safetensors
|
| 34 |
+
e10c54ec49ab87bb7abf1b9df07b2767a37a26ddf1c62014d464144c01ff8ea9 ./model-00008-of-00038.safetensors
|
| 35 |
+
e614af5d55350bda182d21e2e39a69c4436f416f301bc536a8280d6e5fbbc465 ./model-00013-of-00038.safetensors
|
| 36 |
+
ebae9b3f3ff94b0b36b352ec72a0cc228fbe611dc59b3026a44d932961654791 ./model-00022-of-00038.safetensors
|
| 37 |
+
ed6f0578b36b47680355b736a48ab732380b77d29d29f8c0e18386397523ca59 ./model-00004-of-00038.safetensors
|
| 38 |
+
f95d2a591d0ca4cb93db00a13c691f006ac8aaf0a4aeb09fc84447f0176aab10 ./model-00033-of-00038.safetensors
|
modeling_longcat_ngram.py
ADDED
|
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Copyright (c) 2025 Meituan
|
| 3 |
+
# This code is licensed under the MIT License, for details, see the ./LICENSE file.
|
| 4 |
+
|
| 5 |
+
from typing import Optional, Tuple, Dict, List
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch import nn
|
| 9 |
+
|
| 10 |
+
from transformers.cache_utils import Cache, DynamicCache
|
| 11 |
+
from transformers.masking_utils import create_causal_mask
|
| 12 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast
|
| 13 |
+
from transformers.processing_utils import Unpack
|
| 14 |
+
from transformers.utils import auto_docstring, logging
|
| 15 |
+
from transformers.models.longcat_flash.modeling_longcat_flash import (
|
| 16 |
+
LongcatFlashForCausalLM,
|
| 17 |
+
LongcatFlashModel,
|
| 18 |
+
LongcatFlashRMSNorm,
|
| 19 |
+
LongcatFlashRotaryEmbedding,
|
| 20 |
+
LongcatFlashDecoderLayer,
|
| 21 |
+
LongcatFlashPreTrainedModel,
|
| 22 |
+
)
|
| 23 |
+
from .configuration_longcat_ngram import LongcatFlashNgramConfig
|
| 24 |
+
|
| 25 |
+
logger = logging.get_logger(__name__)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@auto_docstring
|
| 29 |
+
class LongcatFlashNgramPreTrainedModel(LongcatFlashPreTrainedModel):
|
| 30 |
+
pass
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class NgramCache(DynamicCache):
|
| 34 |
+
"""
|
| 35 |
+
Extended DynamicCache for storing N-gram context alongside KV cache.
|
| 36 |
+
"""
|
| 37 |
+
def __init__(self, config=None):
|
| 38 |
+
super().__init__()
|
| 39 |
+
self.ngram_context = None
|
| 40 |
+
# Keep only n-1 tokens (minimum needed for N-gram computation)
|
| 41 |
+
self.max_context_len = config.emb_neighbor_num - 1
|
| 42 |
+
|
| 43 |
+
def update_ngram_context(self, new_tokens: torch.Tensor) -> None:
|
| 44 |
+
"""
|
| 45 |
+
Update N-gram context with window management.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
new_tokens: New tokens to append, shape (batch_size, seq_len)
|
| 49 |
+
"""
|
| 50 |
+
if self.ngram_context is None:
|
| 51 |
+
self.ngram_context = new_tokens.clone()
|
| 52 |
+
else:
|
| 53 |
+
self.ngram_context = torch.cat([self.ngram_context, new_tokens], dim=-1)
|
| 54 |
+
|
| 55 |
+
# Truncate to maintain constant memory footprint
|
| 56 |
+
if self.ngram_context.size(-1) > self.max_context_len:
|
| 57 |
+
self.ngram_context = self.ngram_context[..., -self.max_context_len:]
|
| 58 |
+
|
| 59 |
+
def reorder_cache(self, beam_idx: torch.LongTensor) -> "Cache":
|
| 60 |
+
"""Reorder cache for beam search."""
|
| 61 |
+
# Reorder parent's KV cache
|
| 62 |
+
super().reorder_cache(beam_idx)
|
| 63 |
+
|
| 64 |
+
# Reorder N-gram context
|
| 65 |
+
if self.ngram_context is not None:
|
| 66 |
+
self.ngram_context = self.ngram_context.index_select(0, beam_idx.to(self.ngram_context.device))
|
| 67 |
+
|
| 68 |
+
return self
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class NgramEmbedding(nn.Module):
|
| 72 |
+
"""
|
| 73 |
+
Computes embeddings enriched with N-gram features without maintaining internal state.
|
| 74 |
+
"""
|
| 75 |
+
def __init__(self, config, base_embeddings):
|
| 76 |
+
super().__init__()
|
| 77 |
+
self.config = config
|
| 78 |
+
self.word_embeddings = base_embeddings
|
| 79 |
+
|
| 80 |
+
self.m = config.ngram_vocab_size_ratio * config.vocab_size
|
| 81 |
+
self.k = config.emb_split_num
|
| 82 |
+
self.n = config.emb_neighbor_num
|
| 83 |
+
|
| 84 |
+
self._init_ngram_embeddings()
|
| 85 |
+
self._vocab_mods_cache = None
|
| 86 |
+
|
| 87 |
+
def _init_ngram_embeddings(self) -> None:
|
| 88 |
+
"""Initialize N-gram embedding and projection layers."""
|
| 89 |
+
num_embedders = self.k * (self.n - 1)
|
| 90 |
+
emb_dim = self.config.hidden_size // num_embedders
|
| 91 |
+
|
| 92 |
+
embedders = []
|
| 93 |
+
post_projs = []
|
| 94 |
+
|
| 95 |
+
for i in range(num_embedders):
|
| 96 |
+
vocab_size = int(self.m + i * 2 + 1)
|
| 97 |
+
emb = nn.Embedding(vocab_size, emb_dim, padding_idx=self.config.pad_token_id)
|
| 98 |
+
proj = nn.Linear(emb_dim, self.config.hidden_size, bias=False)
|
| 99 |
+
embedders.append(emb)
|
| 100 |
+
post_projs.append(proj)
|
| 101 |
+
|
| 102 |
+
self.embedders = nn.ModuleList(embedders)
|
| 103 |
+
self.post_projs = nn.ModuleList(post_projs)
|
| 104 |
+
|
| 105 |
+
def _shift_right_ignore_eos(self, tensor: torch.Tensor, n: int, eos_token_id: int = 2) -> torch.Tensor:
|
| 106 |
+
"""Shift tensor right by n positions, resetting at EOS tokens."""
|
| 107 |
+
batch_size, seq_len = tensor.shape
|
| 108 |
+
result = torch.zeros_like(tensor)
|
| 109 |
+
eos_mask = (tensor == eos_token_id)
|
| 110 |
+
|
| 111 |
+
for i in range(batch_size):
|
| 112 |
+
eos_positions = eos_mask[i].nonzero(as_tuple=True)[0]
|
| 113 |
+
prev_idx = 0
|
| 114 |
+
|
| 115 |
+
for eos_idx in eos_positions:
|
| 116 |
+
end_idx = eos_idx.item() + 1
|
| 117 |
+
if end_idx - prev_idx > n:
|
| 118 |
+
result[i, prev_idx+n:end_idx] = tensor[i, prev_idx:end_idx-n]
|
| 119 |
+
prev_idx = end_idx
|
| 120 |
+
|
| 121 |
+
if prev_idx < seq_len and seq_len - prev_idx > n:
|
| 122 |
+
result[i, prev_idx+n:seq_len] = tensor[i, prev_idx:seq_len-n]
|
| 123 |
+
|
| 124 |
+
return result
|
| 125 |
+
|
| 126 |
+
def _precompute_vocab_mods(self) -> Dict[Tuple[int, int], List[int]]:
|
| 127 |
+
"""Precompute modular arithmetic values for vocabulary."""
|
| 128 |
+
if self._vocab_mods_cache is not None:
|
| 129 |
+
return self._vocab_mods_cache
|
| 130 |
+
|
| 131 |
+
vocab_mods = {}
|
| 132 |
+
vocab_size = self.config.vocab_size
|
| 133 |
+
|
| 134 |
+
for i in range(2, self.n + 1):
|
| 135 |
+
for j in range(self.k):
|
| 136 |
+
index = (i - 2) * self.k + j
|
| 137 |
+
emb_vocab_dim = int(self.m + index * 2 + 1)
|
| 138 |
+
|
| 139 |
+
mods = []
|
| 140 |
+
power_mod = 1
|
| 141 |
+
for _ in range(i - 1):
|
| 142 |
+
power_mod = (power_mod * vocab_size) % emb_vocab_dim
|
| 143 |
+
mods.append(power_mod)
|
| 144 |
+
|
| 145 |
+
vocab_mods[(i, j)] = mods
|
| 146 |
+
|
| 147 |
+
self._vocab_mods_cache = vocab_mods
|
| 148 |
+
return vocab_mods
|
| 149 |
+
|
| 150 |
+
def _get_ngram_ids(
|
| 151 |
+
self,
|
| 152 |
+
input_ids: torch.Tensor,
|
| 153 |
+
shifted_ids: Dict[int, torch.Tensor],
|
| 154 |
+
vocab_mods: List[int],
|
| 155 |
+
ngram: int
|
| 156 |
+
) -> torch.Tensor:
|
| 157 |
+
"""Compute N-gram hash IDs using polynomial rolling hash."""
|
| 158 |
+
ngram_ids = input_ids.clone()
|
| 159 |
+
for k in range(2, ngram + 1):
|
| 160 |
+
ngram_ids = ngram_ids + shifted_ids[k] * vocab_mods[k - 2]
|
| 161 |
+
return ngram_ids
|
| 162 |
+
|
| 163 |
+
def forward(
|
| 164 |
+
self,
|
| 165 |
+
input_ids: torch.Tensor,
|
| 166 |
+
ngram_context: Optional[torch.Tensor] = None
|
| 167 |
+
) -> torch.Tensor:
|
| 168 |
+
"""
|
| 169 |
+
Stateless forward pass.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
input_ids: Current input token IDs of shape (batch_size, seq_len)
|
| 173 |
+
ngram_context: Optional historical context of shape (batch_size, context_len)
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
Embedding tensor of shape (batch_size, seq_len, hidden_size)
|
| 177 |
+
"""
|
| 178 |
+
seq_len = input_ids.size(-1)
|
| 179 |
+
|
| 180 |
+
# Determine complete context
|
| 181 |
+
if ngram_context is not None:
|
| 182 |
+
context = torch.cat([ngram_context[..., -(self.n-1):], input_ids], dim=-1)
|
| 183 |
+
else:
|
| 184 |
+
context = input_ids
|
| 185 |
+
|
| 186 |
+
# Base word embeddings
|
| 187 |
+
device = self.word_embeddings.weight.device
|
| 188 |
+
x = self.word_embeddings(input_ids.to(device)).clone()
|
| 189 |
+
|
| 190 |
+
# Precompute modular values
|
| 191 |
+
vocab_mods = self._precompute_vocab_mods()
|
| 192 |
+
|
| 193 |
+
# Compute shifted IDs
|
| 194 |
+
shifted_ids = {}
|
| 195 |
+
for i in range(2, self.n + 1):
|
| 196 |
+
shifted_ids[i] = self._shift_right_ignore_eos(
|
| 197 |
+
context, i - 1, eos_token_id=self.config.eos_token_id
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
# Add N-gram embeddings
|
| 201 |
+
for i in range(2, self.n + 1):
|
| 202 |
+
for j in range(self.k):
|
| 203 |
+
index = (i - 2) * self.k + j
|
| 204 |
+
emb_vocab_dim = int(self.m + index * 2 + 1)
|
| 205 |
+
|
| 206 |
+
ngram_ids = self._get_ngram_ids(context, shifted_ids, vocab_mods[(i, j)], ngram=i)
|
| 207 |
+
new_ids = (ngram_ids % emb_vocab_dim)[..., -seq_len:]
|
| 208 |
+
|
| 209 |
+
embedder_device = self.embedders[index].weight.device
|
| 210 |
+
x_ngram = self.embedders[index](new_ids.to(embedder_device))
|
| 211 |
+
|
| 212 |
+
proj_device = self.post_projs[index].weight.device
|
| 213 |
+
x_proj = self.post_projs[index](x_ngram.to(proj_device))
|
| 214 |
+
x = x + x_proj.to(x.device)
|
| 215 |
+
|
| 216 |
+
# Normalize
|
| 217 |
+
x = x / (1 + self.k * (self.n - 1))
|
| 218 |
+
|
| 219 |
+
return x
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
class LongcatFlashNgramModel(LongcatFlashModel):
|
| 223 |
+
"""LongcatFlash model with N-gram enhanced embeddings."""
|
| 224 |
+
_keys_to_ignore_on_load_unexpected = [r"model\.mtp.*"]
|
| 225 |
+
config_class = LongcatFlashNgramConfig
|
| 226 |
+
|
| 227 |
+
def __init__(self, config):
|
| 228 |
+
super().__init__(config)
|
| 229 |
+
|
| 230 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| 231 |
+
self.ngram_embeddings = NgramEmbedding(config, self.embed_tokens)
|
| 232 |
+
|
| 233 |
+
self.layers = nn.ModuleList(
|
| 234 |
+
[LongcatFlashDecoderLayer(config, layer_idx) for layer_idx in range(config.num_layers)]
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
self.head_dim = config.head_dim
|
| 238 |
+
self.config.num_hidden_layers = 2 * config.num_layers
|
| 239 |
+
self.norm = LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| 240 |
+
self.rotary_emb = LongcatFlashRotaryEmbedding(config=config)
|
| 241 |
+
self.gradient_checkpointing = False
|
| 242 |
+
|
| 243 |
+
self.post_init()
|
| 244 |
+
|
| 245 |
+
def forward(
|
| 246 |
+
self,
|
| 247 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 248 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 249 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 250 |
+
past_key_values: Optional[Cache] = None,
|
| 251 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 252 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 253 |
+
use_cache: Optional[bool] = None,
|
| 254 |
+
**kwargs
|
| 255 |
+
) -> BaseModelOutputWithPast:
|
| 256 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
| 257 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
| 258 |
+
|
| 259 |
+
# Extract N-gram context if available
|
| 260 |
+
ngram_context = None
|
| 261 |
+
if isinstance(past_key_values, NgramCache) and past_key_values.ngram_context is not None:
|
| 262 |
+
ngram_context = past_key_values.ngram_context
|
| 263 |
+
|
| 264 |
+
if inputs_embeds is None:
|
| 265 |
+
inputs_embeds = self.ngram_embeddings(input_ids, ngram_context=ngram_context)
|
| 266 |
+
|
| 267 |
+
# Initialize NgramCache if needed
|
| 268 |
+
if use_cache and past_key_values is None:
|
| 269 |
+
past_key_values = NgramCache(config=self.config)
|
| 270 |
+
|
| 271 |
+
# Update N-gram context
|
| 272 |
+
if use_cache and isinstance(past_key_values, NgramCache):
|
| 273 |
+
past_key_values.update_ngram_context(input_ids)
|
| 274 |
+
|
| 275 |
+
# Prepare cache position
|
| 276 |
+
if cache_position is None:
|
| 277 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| 278 |
+
cache_position = torch.arange(
|
| 279 |
+
inputs_embeds.shape[1], device=inputs_embeds.device
|
| 280 |
+
) + past_seen_tokens
|
| 281 |
+
|
| 282 |
+
if position_ids is None:
|
| 283 |
+
position_ids = cache_position.unsqueeze(0)
|
| 284 |
+
|
| 285 |
+
# Create causal mask
|
| 286 |
+
causal_mask = create_causal_mask(
|
| 287 |
+
config=self.config,
|
| 288 |
+
input_embeds=inputs_embeds,
|
| 289 |
+
attention_mask=attention_mask,
|
| 290 |
+
cache_position=cache_position,
|
| 291 |
+
past_key_values=past_key_values,
|
| 292 |
+
position_ids=position_ids,
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
# Forward through decoder layers
|
| 296 |
+
hidden_states = inputs_embeds
|
| 297 |
+
position_embeddings = self.rotary_emb(hidden_states, position_ids)
|
| 298 |
+
|
| 299 |
+
for decoder_layer in self.layers[: self.config.num_layers]:
|
| 300 |
+
hidden_states = decoder_layer(
|
| 301 |
+
hidden_states,
|
| 302 |
+
attention_mask=causal_mask,
|
| 303 |
+
position_ids=position_ids,
|
| 304 |
+
past_key_values=past_key_values,
|
| 305 |
+
cache_position=cache_position,
|
| 306 |
+
position_embeddings=position_embeddings,
|
| 307 |
+
**kwargs,
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
hidden_states = self.norm(hidden_states)
|
| 311 |
+
|
| 312 |
+
return BaseModelOutputWithPast(
|
| 313 |
+
last_hidden_state=hidden_states,
|
| 314 |
+
past_key_values=past_key_values,
|
| 315 |
+
hidden_states=None,
|
| 316 |
+
attentions=None,
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class LongcatFlashNgramForCausalLM(LongcatFlashForCausalLM):
|
| 321 |
+
"""LongcatFlash model for causal language modeling with N-gram embeddings."""
|
| 322 |
+
_keys_to_ignore_on_load_unexpected = [r"model\.mtp.*"]
|
| 323 |
+
config_class = LongcatFlashNgramConfig
|
| 324 |
+
|
| 325 |
+
def __init__(self, config):
|
| 326 |
+
super().__init__(config)
|
| 327 |
+
self.model = LongcatFlashNgramModel(config)
|
| 328 |
+
|
| 329 |
+
@torch.no_grad()
|
| 330 |
+
def generate(self, inputs=None, generation_config=None, **kwargs):
|
| 331 |
+
"""Override to ensure NgramCache is used."""
|
| 332 |
+
|
| 333 |
+
if "past_key_values" not in kwargs or kwargs["past_key_values"] is None:
|
| 334 |
+
kwargs["past_key_values"] = NgramCache(config=self.config)
|
| 335 |
+
|
| 336 |
+
return super().generate(inputs=inputs, generation_config=generation_config, **kwargs)
|
| 337 |
+
|
| 338 |
+
__all__ = ["LongcatFlashNgramPreTrainedModel", "LongcatFlashNgramModel", "LongcatFlashNgramForCausalLM"]
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<longcat_s>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "</longcat_s>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "<longcat_pad>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"unk_token": {
|
| 24 |
+
"content": "<longcat_unk>",
|
| 25 |
+
"lstrip": false,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
}
|
| 30 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_eos_token": true,
|
| 4 |
+
"add_prefix_space": false,
|
| 5 |
+
"bos_token": {
|
| 6 |
+
"__type": "AddedToken",
|
| 7 |
+
"content": "<longcat_s>",
|
| 8 |
+
"lstrip": false,
|
| 9 |
+
"normalized": true,
|
| 10 |
+
"rstrip": false,
|
| 11 |
+
"single_word": false
|
| 12 |
+
},
|
| 13 |
+
"clean_up_tokenization_spaces": false,
|
| 14 |
+
"eos_token": {
|
| 15 |
+
"__type": "AddedToken",
|
| 16 |
+
"content": "</longcat_s>",
|
| 17 |
+
"lstrip": false,
|
| 18 |
+
"normalized": true,
|
| 19 |
+
"rstrip": false,
|
| 20 |
+
"single_word": false
|
| 21 |
+
},
|
| 22 |
+
"model_max_length": 131072,
|
| 23 |
+
"pad_token": {
|
| 24 |
+
"__type": "AddedToken",
|
| 25 |
+
"content": "<longcat_pad>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": true,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
},
|
| 31 |
+
"sp_model_kwargs": {},
|
| 32 |
+
"tokenizer_class": "BloomTokenizer",
|
| 33 |
+
"unk_token": {
|
| 34 |
+
"__type": "AddedToken",
|
| 35 |
+
"content": "<longcat_unk>",
|
| 36 |
+
"lstrip": false,
|
| 37 |
+
"normalized": true,
|
| 38 |
+
"rstrip": false,
|
| 39 |
+
"single_word": false
|
| 40 |
+
},
|
| 41 |
+
"chat_template": "{%- set tool_choice = tool_choice | default('auto') %}\n{%- set ns = namespace(tool_types = [], last_query_index = -1) %}\n\n{%- if tools and tool_choice != 'none' %}\n {{- \"<longcat_tool_declare>\\n\"-}}\n {{- \"# Tools\\n\" }}\n {{- \"You have access to the following tools:\\n\\n\" }}\n {%- for tool in tools %}\n {%- if tool.type not in ns.tool_types %}\n {%- set ns.tool_types = ns.tool_types + [tool.type] %}\n {{- \"## Tool namespace: \" ~ tool.type ~ \"\\n\\n\" }}\n {%- endif %}\n {%- if tool.type == 'code_interpreter' %}\n {%- set tool = {\"type\":\"code_interpreter\",\"function\":{\"name\":\"code_interpreter_preview\",\"description\":\"The code will be executed in a stateful Jupyter notebook sandbox environment, only supports local computation, data processing, and file operations.\\nCode sandbox environment (network isolated) Any external network requests or online API calls are prohibited.\\nIf online functionality is needed, please use other permitted tools.\\nCode will respond with the output of the execution or time out after 60.0 seconds. \",\"parameters\":{\"type\":\"object\",\"properties\":{\"language\":{\"type\":\"string\",\"description\":\"The programming language of the code to be executed. Available values: python (Default), java, go, js, ts, c, c++.\"},\"code\":{\"type\":\"string\",\"description\":\"Python code to be executed must not include the following:\\n- Importing network libraries such as requests, httplib, etc.\\n- Any form of HTTP requests.\\n- External API calls.\\n- Network port operations. Example: ```python\\nimport pandas as pd\\npd.DataFrame({'A':[1,2]})\\n```\"},\"timeout\":{\"type\":\"number\",\"description\":\"The maximum execution time of the code, in seconds. Default is 60.0.\"}}},\"required\":[\"code\"]}} %}\n {%- endif %}\n {{- \"### Tool name: \" + tool.function.name + \"\\n\" }}\n {{- \"Description: \" + tool.function.description + \"\\n\\n\" }}\n {{- \"InputSchema: \" + tool.function.parameters | tojson(ensure_ascii=False) + \"\\n\\n\" }}\n {%- endfor %}\n {{- '**Note**: For each function call, output the function name and arguments within the following XML format:\\n<longcat_tool_call>{function-name}\\n<longcat_arg_key>{arg-key-1}</longcat_arg_key>\\n<longcat_arg_value>{arg-value-1}</longcat_arg_value>\\n<longcat_arg_key>{arg-key-2}</longcat_arg_key>\\n<longcat_arg_value>{arg-value-2}</longcat_arg_value>\\n...\\n</longcat_tool_call>\\n' }}\n {{- \"</longcat_tool_declare>\"-}}\n {%- for idx in range(messages|length - 1) %}\n {%- set msg = messages[idx] %}\n {%- if msg.role == 'assistant' and not msg.tool_calls %}\n {%- set ns.last_query_index = idx %}\n {%- endif %}\n {%- endfor%}\n{%- endif %}\n\n{%- for msg in messages %}\n {%- if msg.role == \"system\" %}\n {{- \"<longcat_system>\" + msg.content }}\n {%- elif msg.role == \"user\" %}\n {{- \"<longcat_user>\" }}\n {%- if msg[\"files\"] %}\n {{- '<longcat_files>\\n' ~ msg.files | tojson(indent=2) ~ '\\n</longcat_files>' }}\n {%- endif %}\n {{- msg.content }}\n {%- elif msg.role == \"assistant\" %}\n {{- \"<longcat_assistant>\" }}\n {%- if enable_thinking == true and msg.reasoning_content and ns.tool_types != [] and loop.index0 > ns.last_query_index %}\n {{- \"\\n<longcat_think>\\n\" ~ msg.reasoning_content ~ \"\\n</longcat_think>\\n\" }}\n {%- endif %}\n {%- if msg.content%}\n {{- msg.content }}\n {%- endif %}\n {%- if msg.tool_calls %}\n {%- for tool_call in msg.tool_calls -%}\n {{- \"<longcat_tool_call>\" ~ tool_call.function.name ~ \"\\n\" -}}\n {% set _args = tool_call.function.arguments %}\n {% for k, v in _args.items() %}\n {{- \"<longcat_arg_key>\" ~ k ~ \"</longcat_arg_key>\\n\" -}}\n {{- \"<longcat_arg_value>\" ~ (v if v is string else v | tojson(ensure_ascii=False)) ~ \"</longcat_arg_value>\\n\" -}}\n {% endfor %}\n {{- \"</longcat_tool_call>\\n\" }}\n {%- endfor %}\n {%- endif %}\n {{- \"</longcat_s>\" -}}\n {%- elif msg.role == \"tool\" %}\n {%- if messages[loop.index0 - 1].role != \"tool\"%}\n {{- \"<longcat_user>\" -}}\n {%- endif %}\n {{- \"<longcat_tool_response>\" ~ msg.content ~ \"</longcat_tool_response>\"-}}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {%- if enable_thinking == true %}\n {{- \" /think_on\" }}\n {%- if thinking_budget %}\n {%- if thinking_budget < 1024 %}\n {%- set thinking_budget = 1024 %}\n {%- endif%}\n {{- \"\\nthinking_budget: < \" ~ thinking_budget ~ \".\"}}\n {%- endif %}\n {{- \" <longcat_assistant><longcat_think>\\n\"}}\n {%- elif enable_thinking == false %}\n {{- \" /think_off <longcat_assistant><longcat_think>\\n\\n</longcat_think>\\n\" }}\n {%- else %}\n {{- \"<longcat_assistant>\" }}\n {%- endif %}\n{%- endif %}"
|
| 42 |
+
}
|