mirror of
https://github.com/JasonYANG170/CodeGeeX4.git
synced 2024-11-23 12:16:33 +00:00
添加candle_demo
This commit is contained in:
parent
8be6c39820
commit
231f76610d
2682
candle_demo/Cargo.lock
generated
Executable file
2682
candle_demo/Cargo.lock
generated
Executable file
File diff suppressed because it is too large
Load Diff
28
candle_demo/Cargo.toml
Executable file
28
candle_demo/Cargo.toml
Executable file
|
@ -0,0 +1,28 @@
|
|||
[package]
|
||||
name = "codegeex4-candle"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["Donjuan Platinum <donjuan@lecturify.net>"]
|
||||
license = "GPL-2.0-only"
|
||||
description = "Codegeex4"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
# candle-transformers = {path = "../candle/candle-transformers"}
|
||||
# candle-core = {path = "../candle/candle-core"}
|
||||
# candle-nn = {path = "../candle/candle-nn"}
|
||||
anyhow = "1.0.86"
|
||||
hf-hub = "0.3.2"
|
||||
#tokenizer = "0.1.2"
|
||||
clap = { version = "4.5.6", features = ["derive"] }
|
||||
#tracing-chrome = "0.7.2"
|
||||
#candle-examples = {path = "../candle/candle-examples"}
|
||||
#tracing-subscriber = "0.3.18"
|
||||
tokenizers = "0.19.1"
|
||||
serde_json = "1.0.120"
|
||||
candle-core = "0.6.0"
|
||||
candle-transformers = "0.6.0"
|
||||
candle-examples = "0.6.0"
|
||||
candle-nn = "0.6.0"
|
||||
safetensors = "0.4.3"
|
||||
#safetensors = {path ="../safetensors/safetensors"}
|
592
candle_demo/src/codegeex4.rs
Executable file
592
candle_demo/src/codegeex4.rs
Executable file
|
@ -0,0 +1,592 @@
|
|||
use candle_transformers::models::with_tracing::{linear_b as linear, Linear};
|
||||
use candle_core::{DType, Device, IndexOp, Module, Result, Tensor, D};
|
||||
use candle_core as candle;
|
||||
use candle_nn::VarBuilder;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Config {
|
||||
pub num_layers: usize,
|
||||
pub padded_vocab_size: usize,
|
||||
pub hidden_size: usize,
|
||||
pub ffn_hidden_size: usize,
|
||||
pub kv_channels: usize,
|
||||
pub num_attention_heads: usize,
|
||||
pub seq_length: usize,
|
||||
pub layernorm_epsilon: f64,
|
||||
pub rmsnorm: bool,
|
||||
pub apply_residual_connection_post_layernorm: bool,
|
||||
pub post_layer_norm: bool,
|
||||
pub add_bias_linear: bool,
|
||||
pub add_qkv_bias: bool,
|
||||
pub bias_dropout_fusion: bool,
|
||||
pub multi_query_attention: bool,
|
||||
pub multi_query_group_num: usize,
|
||||
pub apply_query_key_layer_scaling: bool,
|
||||
pub attention_softmax_in_fp32: bool,
|
||||
pub fp32_residual_connection: bool,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn codegeex4() -> Self {
|
||||
Self {
|
||||
num_layers: 40,
|
||||
padded_vocab_size: 151552,
|
||||
hidden_size: 4096,
|
||||
ffn_hidden_size: 13696,
|
||||
kv_channels: 128,
|
||||
num_attention_heads: 32,
|
||||
seq_length: 131072,
|
||||
layernorm_epsilon: 1e-5,
|
||||
rmsnorm: true,
|
||||
apply_residual_connection_post_layernorm: false,
|
||||
post_layer_norm: true,
|
||||
add_bias_linear: false,
|
||||
add_qkv_bias: true,
|
||||
bias_dropout_fusion: true,
|
||||
multi_query_attention: true,
|
||||
multi_query_group_num: 2,
|
||||
apply_query_key_layer_scaling: true,
|
||||
attention_softmax_in_fp32: true,
|
||||
fp32_residual_connection: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct RotaryEmbedding {
|
||||
cache: Tensor,
|
||||
}
|
||||
|
||||
impl RotaryEmbedding {
|
||||
fn new(cfg: &Config, dtype: DType, dev: &Device) -> Result<Self> {
|
||||
let rotary_dim = cfg.kv_channels;
|
||||
let n_elem = rotary_dim / 2;
|
||||
let inv_freq: Vec<_> = (0..n_elem)
|
||||
.step_by(2)
|
||||
.map(|i| 1f32 / 10_000f64.powf(i as f64 / n_elem as f64) as f32)
|
||||
.collect();
|
||||
let inv_freq_len = inv_freq.len();
|
||||
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
|
||||
let t = Tensor::arange(0u32, cfg.seq_length as u32, dev)?
|
||||
.to_dtype(dtype)?
|
||||
.reshape((cfg.seq_length, 1))?;
|
||||
let freqs = t.matmul(&inv_freq)?;
|
||||
let cache = Tensor::stack(&[&freqs.cos()?, &freqs.sin()?], D::Minus1)?;
|
||||
Ok(Self { cache })
|
||||
}
|
||||
|
||||
fn apply(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
|
||||
let (seqlen, _b, np, _hn) = xs.dims4()?;
|
||||
let cache = self.cache.narrow(0, seqlen_offset, seqlen)?;
|
||||
let rot_dim = cache.dim(D::Minus2)? * 2;
|
||||
let (xs, xs_pass) = (
|
||||
xs.narrow(D::Minus1, 0, rot_dim)?,
|
||||
xs.narrow(D::Minus1, rot_dim, rot_dim)?,
|
||||
);
|
||||
let xshaped = xs.reshape((seqlen, (), np, rot_dim / 2, 2))?;
|
||||
let cache = cache.reshape((seqlen, (), 1, rot_dim / 2, 2))?;
|
||||
let (xshaped0, xshaped1) = (
|
||||
xshaped.i((.., .., .., .., 0))?,
|
||||
xshaped.i((.., .., .., .., 1))?,
|
||||
);
|
||||
let (cache0, cache1) = (cache.i((.., .., .., .., 0))?, cache.i((.., .., .., .., 1))?);
|
||||
let xs_out = Tensor::stack(
|
||||
&[
|
||||
(xshaped0.broadcast_mul(&cache0)? - xshaped1.broadcast_mul(&cache1)?)?,
|
||||
(xshaped1.broadcast_mul(&cache0)? + xshaped0.broadcast_mul(&cache1)?)?,
|
||||
],
|
||||
D::Minus1,
|
||||
)?;
|
||||
let xs_out = xs_out.flatten_from(3)?;
|
||||
Tensor::cat(&[xs_out, xs_pass], D::Minus1)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct CoreAttention {
|
||||
coeff: Option<f64>,
|
||||
norm_factor: f64,
|
||||
}
|
||||
|
||||
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
|
||||
let shape = mask.shape();
|
||||
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
|
||||
let m = mask.where_cond(&on_true, on_false)?;
|
||||
Ok(m)
|
||||
}
|
||||
|
||||
impl CoreAttention {
|
||||
fn new(layer_number: usize, cfg: &Config) -> Result<Self> {
|
||||
let norm_factor = (cfg.kv_channels as f64).sqrt();
|
||||
let (norm_factor, coeff) = if cfg.apply_query_key_layer_scaling {
|
||||
let coeff = f64::max(1.0, layer_number as f64);
|
||||
(norm_factor * coeff, Some(coeff))
|
||||
} else {
|
||||
(norm_factor, None)
|
||||
};
|
||||
Ok(Self { coeff, norm_factor })
|
||||
}
|
||||
|
||||
fn forward(
|
||||
&self,
|
||||
query_layer: &Tensor,
|
||||
key_layer: &Tensor,
|
||||
value_layer: &Tensor,
|
||||
attention_mask: &Option<Tensor>,
|
||||
) -> Result<Tensor> {
|
||||
let output_size = (
|
||||
query_layer.dim(1)?, // b
|
||||
query_layer.dim(2)?, // np
|
||||
query_layer.dim(0)?, // sq
|
||||
key_layer.dim(0)?, // sk
|
||||
);
|
||||
let query_layer =
|
||||
query_layer.reshape((output_size.2, output_size.0 * output_size.1, ()))?;
|
||||
let key_layer = key_layer.reshape((output_size.3, output_size.0 * output_size.1, ()))?;
|
||||
let matmul_result = Tensor::matmul(
|
||||
&query_layer.transpose(0, 1)?,
|
||||
&key_layer.transpose(0, 1)?.transpose(1, 2)?,
|
||||
)?;
|
||||
let matmul_result = (matmul_result / self.norm_factor)?.reshape(output_size)?;
|
||||
let matmul_result = match self.coeff {
|
||||
None => matmul_result,
|
||||
Some(coeff) => (matmul_result * coeff)?,
|
||||
};
|
||||
let attention_scores = match attention_mask {
|
||||
Some(mask) => masked_fill(
|
||||
&matmul_result,
|
||||
&mask.broadcast_left((matmul_result.dim(0)?, matmul_result.dim(1)?))?,
|
||||
f32::NEG_INFINITY,
|
||||
)?,
|
||||
None => matmul_result,
|
||||
};
|
||||
let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?;
|
||||
|
||||
let output_size = (
|
||||
value_layer.dim(1)?,
|
||||
value_layer.dim(2)?,
|
||||
query_layer.dim(0)?,
|
||||
value_layer.dim(3)?,
|
||||
);
|
||||
let value_layer =
|
||||
value_layer.reshape((value_layer.dim(0)?, output_size.0 * output_size.1, ()))?;
|
||||
let attention_probs =
|
||||
attention_probs.reshape((output_size.0 * output_size.1, output_size.2, ()))?;
|
||||
let context_layer = Tensor::matmul(&attention_probs, &value_layer.transpose(0, 1)?)?;
|
||||
let context_layer = context_layer.reshape(output_size)?;
|
||||
let context_layer = context_layer.permute((2, 0, 1, 3))?.contiguous()?;
|
||||
context_layer.flatten_from(D::Minus2)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct SelfAttention {
|
||||
query_key_value: Linear,
|
||||
core_attention: CoreAttention,
|
||||
dense: Linear,
|
||||
multi_query_attention: bool,
|
||||
num_attention_heads_per_partition: usize,
|
||||
num_multi_query_groups_per_partition: usize,
|
||||
hidden_size_per_attention_head: usize,
|
||||
kv_cache: Option<(Tensor, Tensor)>,
|
||||
}
|
||||
|
||||
impl SelfAttention {
|
||||
fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
|
||||
let projection_size = cfg.kv_channels * cfg.num_attention_heads;
|
||||
let hidden_size_per_attention_head = projection_size / cfg.num_attention_heads;
|
||||
let qkv_hidden_size = if cfg.multi_query_attention {
|
||||
projection_size + 2 * hidden_size_per_attention_head * cfg.multi_query_group_num
|
||||
} else {
|
||||
3 * projection_size
|
||||
};
|
||||
let query_key_value = linear(
|
||||
cfg.hidden_size,
|
||||
qkv_hidden_size,
|
||||
cfg.add_bias_linear || cfg.add_qkv_bias,
|
||||
vb.pp("query_key_value"),
|
||||
)?;
|
||||
let core_attention = CoreAttention::new(layer_number, cfg)?;
|
||||
let dense = linear(
|
||||
cfg.hidden_size,
|
||||
cfg.hidden_size,
|
||||
cfg.add_bias_linear,
|
||||
vb.pp("dense"),
|
||||
)?;
|
||||
Ok(Self {
|
||||
query_key_value,
|
||||
core_attention,
|
||||
dense,
|
||||
multi_query_attention: cfg.multi_query_attention,
|
||||
num_attention_heads_per_partition: cfg.num_attention_heads,
|
||||
num_multi_query_groups_per_partition: cfg.multi_query_group_num,
|
||||
hidden_size_per_attention_head: cfg.kv_channels,
|
||||
kv_cache: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn reset_kv_cache(&mut self) {
|
||||
self.kv_cache = None
|
||||
}
|
||||
|
||||
fn forward(
|
||||
&mut self,
|
||||
xs: &Tensor,
|
||||
attention_mask: &Option<Tensor>,
|
||||
rotary_emb: &RotaryEmbedding,
|
||||
) -> Result<Tensor> {
|
||||
let mixed_x_layer = xs.apply(&self.query_key_value)?;
|
||||
if !self.multi_query_attention {
|
||||
candle::bail!("only multi_query_attention=true is supported")
|
||||
}
|
||||
let hpa = self.hidden_size_per_attention_head;
|
||||
let query_layer =
|
||||
mixed_x_layer.narrow(D::Minus1, 0, self.num_attention_heads_per_partition * hpa)?;
|
||||
let key_layer = mixed_x_layer.narrow(
|
||||
D::Minus1,
|
||||
self.num_attention_heads_per_partition * hpa,
|
||||
self.num_multi_query_groups_per_partition * hpa,
|
||||
)?;
|
||||
let value_layer = mixed_x_layer.narrow(
|
||||
D::Minus1,
|
||||
self.num_attention_heads_per_partition * hpa
|
||||
+ self.num_multi_query_groups_per_partition * hpa,
|
||||
self.num_multi_query_groups_per_partition * hpa,
|
||||
)?;
|
||||
let query_layer = query_layer.reshape((
|
||||
query_layer.dim(0)?,
|
||||
query_layer.dim(1)?,
|
||||
self.num_attention_heads_per_partition,
|
||||
hpa,
|
||||
))?;
|
||||
let key_layer = key_layer.reshape((
|
||||
key_layer.dim(0)?,
|
||||
key_layer.dim(1)?,
|
||||
self.num_multi_query_groups_per_partition,
|
||||
hpa,
|
||||
))?;
|
||||
let value_layer = value_layer.reshape((
|
||||
value_layer.dim(0)?,
|
||||
value_layer.dim(1)?,
|
||||
self.num_multi_query_groups_per_partition,
|
||||
hpa,
|
||||
))?;
|
||||
|
||||
// Rotary embeddings.
|
||||
let seqlen_offset = match &self.kv_cache {
|
||||
None => 0,
|
||||
Some((prev_k, _)) => prev_k.dim(0)?,
|
||||
};
|
||||
let query_layer = rotary_emb.apply(&query_layer, seqlen_offset)?;
|
||||
let key_layer = rotary_emb.apply(&key_layer, seqlen_offset)?;
|
||||
|
||||
// KV cache.
|
||||
let (key_layer, value_layer) = match &self.kv_cache {
|
||||
None => (key_layer, value_layer),
|
||||
Some((prev_k, prev_v)) => {
|
||||
let k = Tensor::cat(&[prev_k, &key_layer], 0)?;
|
||||
let v = Tensor::cat(&[prev_v, &value_layer], 0)?;
|
||||
(k, v)
|
||||
}
|
||||
};
|
||||
self.kv_cache = Some((key_layer.clone(), value_layer.clone()));
|
||||
|
||||
// Repeat KV.
|
||||
let ratio =
|
||||
self.num_attention_heads_per_partition / self.num_multi_query_groups_per_partition;
|
||||
let key_layer = {
|
||||
let (d0, d1, d2, d3) = key_layer.dims4()?;
|
||||
key_layer
|
||||
.unsqueeze(D::Minus2)?
|
||||
.expand((d0, d1, d2, ratio, d3))?
|
||||
.reshape((
|
||||
d0,
|
||||
d1,
|
||||
self.num_attention_heads_per_partition,
|
||||
self.hidden_size_per_attention_head,
|
||||
))?
|
||||
};
|
||||
let value_layer = {
|
||||
let (d0, d1, d2, d3) = value_layer.dims4()?;
|
||||
value_layer
|
||||
.unsqueeze(D::Minus2)?
|
||||
.expand((d0, d1, d2, ratio, d3))?
|
||||
.reshape((
|
||||
d0,
|
||||
d1,
|
||||
self.num_attention_heads_per_partition,
|
||||
self.hidden_size_per_attention_head,
|
||||
))?
|
||||
};
|
||||
|
||||
let context_layer =
|
||||
self.core_attention
|
||||
.forward(&query_layer, &key_layer, &value_layer, attention_mask)?;
|
||||
let output = context_layer.apply(&self.dense)?;
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
#[derive(Debug, Clone)]
|
||||
struct MLP {
|
||||
dense_h_to_4h: Linear,
|
||||
dense_4h_to_h: Linear,
|
||||
}
|
||||
|
||||
impl MLP {
|
||||
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
|
||||
let dense_h_to_4h = linear(
|
||||
cfg.hidden_size,
|
||||
cfg.ffn_hidden_size * 2,
|
||||
cfg.add_bias_linear,
|
||||
vb.pp("dense_h_to_4h"),
|
||||
)?;
|
||||
let dense_4h_to_h = linear(
|
||||
cfg.ffn_hidden_size,
|
||||
cfg.hidden_size,
|
||||
cfg.add_bias_linear,
|
||||
vb.pp("dense_4h_to_h"),
|
||||
)?;
|
||||
Ok(Self {
|
||||
dense_4h_to_h,
|
||||
dense_h_to_4h,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Module for MLP {
|
||||
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
|
||||
xs.apply(&self.dense_h_to_4h)?
|
||||
.apply(&candle_nn::Activation::Swiglu)?
|
||||
.apply(&self.dense_4h_to_h)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct Block {
|
||||
input_layernorm: candle_nn::LayerNorm,
|
||||
self_attention: SelfAttention,
|
||||
post_attention_layernorm: candle_nn::LayerNorm,
|
||||
mlp: MLP,
|
||||
apply_residual_connection_post_layernorm: bool,
|
||||
}
|
||||
|
||||
impl Block {
|
||||
fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
|
||||
let input_layernorm = if cfg.rmsnorm {
|
||||
candle_nn::rms_norm(
|
||||
cfg.hidden_size,
|
||||
cfg.layernorm_epsilon,
|
||||
vb.pp("input_layernorm"),
|
||||
)?
|
||||
.into_inner()
|
||||
} else {
|
||||
candle_nn::layer_norm(
|
||||
cfg.hidden_size,
|
||||
cfg.layernorm_epsilon,
|
||||
vb.pp("input_layernorm"),
|
||||
)?
|
||||
};
|
||||
let post_attention_layernorm = if cfg.rmsnorm {
|
||||
candle_nn::rms_norm(
|
||||
cfg.hidden_size,
|
||||
cfg.layernorm_epsilon,
|
||||
vb.pp("post_attention_layernorm"),
|
||||
)?
|
||||
.into_inner()
|
||||
} else {
|
||||
candle_nn::layer_norm(
|
||||
cfg.hidden_size,
|
||||
cfg.layernorm_epsilon,
|
||||
vb.pp("post_attention_layernorm"),
|
||||
)?
|
||||
};
|
||||
let self_attention = SelfAttention::new(layer_number, cfg, vb.pp("self_attention"))?;
|
||||
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
|
||||
Ok(Self {
|
||||
input_layernorm,
|
||||
self_attention,
|
||||
post_attention_layernorm,
|
||||
mlp,
|
||||
apply_residual_connection_post_layernorm: cfg.apply_residual_connection_post_layernorm,
|
||||
})
|
||||
}
|
||||
|
||||
fn reset_kv_cache(&mut self) {
|
||||
self.self_attention.reset_kv_cache()
|
||||
}
|
||||
|
||||
fn forward(
|
||||
&mut self,
|
||||
xs: &Tensor,
|
||||
attention_mask: &Option<Tensor>,
|
||||
rotary_emb: &RotaryEmbedding,
|
||||
) -> Result<Tensor> {
|
||||
let layernorm_output = xs.apply(&self.input_layernorm)?;
|
||||
let attention_output =
|
||||
self.self_attention
|
||||
.forward(&layernorm_output, attention_mask, rotary_emb)?;
|
||||
let residual = if self.apply_residual_connection_post_layernorm {
|
||||
&layernorm_output
|
||||
} else {
|
||||
xs
|
||||
};
|
||||
let layernorm_input = (residual + attention_output)?;
|
||||
let layernorm_output = layernorm_input.apply(&self.post_attention_layernorm)?;
|
||||
let mlp_output = layernorm_output.apply(&self.mlp)?;
|
||||
let residual = if self.apply_residual_connection_post_layernorm {
|
||||
&layernorm_output
|
||||
} else {
|
||||
&layernorm_input
|
||||
};
|
||||
mlp_output + residual
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct Transformer {
|
||||
layers: Vec<Block>,
|
||||
final_layernorm: Option<candle_nn::LayerNorm>,
|
||||
rotary_emb: RotaryEmbedding,
|
||||
}
|
||||
|
||||
impl Transformer {
|
||||
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
|
||||
let vb_l = vb.pp("layers");
|
||||
let mut layers = Vec::with_capacity(cfg.num_layers);
|
||||
println!("transofrmer layers create");
|
||||
let mut count = 0;
|
||||
for layer_index in 0..cfg.num_layers {
|
||||
count += 1;
|
||||
println!("for layer index in {} total is {} ",count, cfg.num_layers);
|
||||
let block = Block::new(layer_index + 1, cfg, vb_l.pp(layer_index))?;
|
||||
layers.push(block)
|
||||
}
|
||||
let final_layernorm = if cfg.post_layer_norm {
|
||||
let ln = if cfg.rmsnorm {
|
||||
candle_nn::rms_norm(
|
||||
cfg.hidden_size,
|
||||
cfg.layernorm_epsilon,
|
||||
vb.pp("final_layernorm"),
|
||||
)?
|
||||
.into_inner()
|
||||
} else {
|
||||
candle_nn::layer_norm(
|
||||
cfg.hidden_size,
|
||||
cfg.layernorm_epsilon,
|
||||
vb.pp("final_layernorm"),
|
||||
)?
|
||||
};
|
||||
Some(ln)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let rotary_emb = RotaryEmbedding::new(cfg, vb.dtype(), vb.device())?;
|
||||
Ok(Self {
|
||||
layers,
|
||||
final_layernorm,
|
||||
rotary_emb,
|
||||
})
|
||||
}
|
||||
|
||||
fn reset_kv_cache(&mut self) {
|
||||
for block in self.layers.iter_mut() {
|
||||
block.reset_kv_cache()
|
||||
}
|
||||
}
|
||||
|
||||
fn forward(&mut self, xs: &Tensor, attention_mask: &Option<Tensor>) -> Result<Tensor> {
|
||||
let mut xs = xs.clone();
|
||||
for block in self.layers.iter_mut() {
|
||||
xs = block.forward(&xs, attention_mask, &self.rotary_emb)?
|
||||
}
|
||||
match self.final_layernorm.as_ref() {
|
||||
None => Ok(xs),
|
||||
Some(ln) => xs.apply(ln),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct Embedding {
|
||||
word_embeddings: candle_nn::Embedding,
|
||||
fp32_residual_connection: bool,
|
||||
}
|
||||
|
||||
impl Embedding {
|
||||
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
|
||||
let word_embeddings = candle_nn::embedding(
|
||||
cfg.padded_vocab_size,
|
||||
cfg.hidden_size,
|
||||
vb.pp("word_embeddings"),
|
||||
)?;
|
||||
Ok(Self {
|
||||
word_embeddings,
|
||||
fp32_residual_connection: cfg.fp32_residual_connection,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Module for Embedding {
|
||||
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
|
||||
let xs = self.word_embeddings.forward(xs)?.transpose(0, 1)?; // b,s,h -> s,b,h
|
||||
if self.fp32_residual_connection {
|
||||
xs.to_dtype(candle::DType::F32)
|
||||
} else {
|
||||
xs.contiguous()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Model {
|
||||
embedding: Embedding,
|
||||
encoder: Transformer,
|
||||
output_layer: Linear,
|
||||
}
|
||||
|
||||
fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
|
||||
let mask: Vec<_> = (0..size)
|
||||
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
|
||||
.collect();
|
||||
Tensor::from_slice(&mask, (size, size), device)
|
||||
}
|
||||
|
||||
impl Model {
|
||||
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
|
||||
let vb = vb.pp("transformer");
|
||||
let embedding = Embedding::new(cfg, vb.pp("embedding"))?;
|
||||
let encoder = Transformer::new(cfg, vb.pp("encoder"))?;
|
||||
let output_layer = linear(
|
||||
cfg.hidden_size,
|
||||
cfg.padded_vocab_size,
|
||||
false,
|
||||
vb.pp("output_layer"),
|
||||
)?;
|
||||
|
||||
|
||||
Ok(Self {
|
||||
embedding,
|
||||
encoder,
|
||||
output_layer,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn reset_kv_cache(&mut self) {
|
||||
self.encoder.reset_kv_cache()
|
||||
}
|
||||
|
||||
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
|
||||
let (_b_size, seq_len) = xs.dims2()?;
|
||||
let input_embeds = xs.apply(&self.embedding)?;
|
||||
let attention_mask = if seq_len <= 1 {
|
||||
None
|
||||
} else {
|
||||
Some(get_mask(seq_len, xs.device())?)
|
||||
};
|
||||
let xs = self.encoder.forward(&input_embeds, &attention_mask)?;
|
||||
let lm_logits = xs.i(seq_len - 1)?.apply(&self.output_layer)?;
|
||||
Ok(lm_logits)
|
||||
}
|
||||
}
|
1
candle_demo/src/lib.rs
Executable file
1
candle_demo/src/lib.rs
Executable file
|
@ -0,0 +1 @@
|
|||
pub mod codegeex4;
|
230
candle_demo/src/main.rs
Executable file
230
candle_demo/src/main.rs
Executable file
|
@ -0,0 +1,230 @@
|
|||
use anyhow::{Error as E, Result};
|
||||
use clap::Parser;
|
||||
use codegeex4_candle::codegeex4::*;
|
||||
|
||||
|
||||
use candle_core::{DType, Device, Tensor};
|
||||
use candle_core as candle;
|
||||
use candle_nn::VarBuilder;
|
||||
use candle_transformers::generation::LogitsProcessor;
|
||||
use hf_hub::{api::sync::Api, Repo, RepoType};
|
||||
use tokenizers::Tokenizer;
|
||||
|
||||
struct TextGeneration {
|
||||
model: Model,
|
||||
device: Device,
|
||||
tokenizer: Tokenizer,
|
||||
logits_processor: LogitsProcessor,
|
||||
repeat_penalty: f32,
|
||||
repeat_last_n: usize,
|
||||
verbose_prompt: bool,
|
||||
}
|
||||
|
||||
impl TextGeneration {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn new(
|
||||
model: Model,
|
||||
tokenizer: Tokenizer,
|
||||
seed: u64,
|
||||
temp: Option<f64>,
|
||||
top_p: Option<f64>,
|
||||
repeat_penalty: f32,
|
||||
repeat_last_n: usize,
|
||||
verbose_prompt: bool,
|
||||
device: &Device,
|
||||
) -> Self {
|
||||
let logits_processor = LogitsProcessor::new(seed, temp, top_p);
|
||||
Self {
|
||||
model,
|
||||
tokenizer,
|
||||
logits_processor,
|
||||
repeat_penalty,
|
||||
repeat_last_n,
|
||||
verbose_prompt,
|
||||
device: device.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> {
|
||||
use std::io::Write;
|
||||
println!("starting the inference loop");
|
||||
let tokens = self.tokenizer.encode(prompt, true).map_err(E::msg)?;
|
||||
println!("run starting the token 57");
|
||||
if tokens.is_empty() {
|
||||
anyhow::bail!("Empty prompts are not supported in the chatglm model.")
|
||||
}
|
||||
if self.verbose_prompt {
|
||||
for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) {
|
||||
let token = token.replace('▁', " ").replace("<0x0A>", "\n");
|
||||
println!("{id:7} -> '{token}'");
|
||||
}
|
||||
}
|
||||
let mut tokens = tokens.get_ids().to_vec();
|
||||
let mut generated_tokens = 0usize;
|
||||
let eos_token = match self.tokenizer.get_vocab(true).get("</s>") {
|
||||
Some(token) => *token,
|
||||
None => anyhow::bail!("cannot find the endoftext token"),
|
||||
};
|
||||
print!("{prompt}");
|
||||
std::io::stdout().flush()?;
|
||||
let start_gen = std::time::Instant::now();
|
||||
println!("start_gen");
|
||||
println!("samplelen {}",sample_len);
|
||||
let mut count = 0;
|
||||
for index in 0..sample_len {
|
||||
count += 1;
|
||||
println!("sample count {}",count);
|
||||
let context_size = if index > 0 { 1 } else { tokens.len() };
|
||||
let ctxt = &tokens[tokens.len().saturating_sub(context_size)..];
|
||||
let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?;
|
||||
let logits = self.model.forward(&input)?;
|
||||
let logits = logits.squeeze(0)?.to_dtype(DType::F32)?;
|
||||
let logits = if self.repeat_penalty == 1. {
|
||||
logits
|
||||
} else {
|
||||
let start_at = tokens.len().saturating_sub(self.repeat_last_n);
|
||||
candle_transformers::utils::apply_repeat_penalty(
|
||||
&logits,
|
||||
self.repeat_penalty,
|
||||
&tokens[start_at..],
|
||||
)?
|
||||
};
|
||||
|
||||
let next_token = self.logits_processor.sample(&logits)?;
|
||||
tokens.push(next_token);
|
||||
generated_tokens += 1;
|
||||
if next_token == eos_token {
|
||||
break;
|
||||
}
|
||||
let token = self.tokenizer.decode(&[next_token], true).map_err(E::msg)?;
|
||||
print!("{token}");
|
||||
std::io::stdout().flush()?;
|
||||
}
|
||||
let dt = start_gen.elapsed();
|
||||
println!(
|
||||
"\n{generated_tokens} tokens generated ({:.2} token/s)",
|
||||
generated_tokens as f64 / dt.as_secs_f64(),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Args {
|
||||
/// Run on CPU rather than on GPU.
|
||||
#[arg(name="cache",short,long, default_value=".")]
|
||||
cache_path: String,
|
||||
|
||||
#[arg(long)]
|
||||
cpu: bool,
|
||||
|
||||
/// Display the token for the specified prompt.
|
||||
#[arg(long)]
|
||||
verbose_prompt: bool,
|
||||
|
||||
#[arg(long)]
|
||||
prompt: String,
|
||||
|
||||
/// The temperature used to generate samples.
|
||||
#[arg(long)]
|
||||
temperature: Option<f64>,
|
||||
|
||||
/// Nucleus sampling probability cutoff.
|
||||
#[arg(long)]
|
||||
top_p: Option<f64>,
|
||||
|
||||
/// The seed to use when generating random samples.
|
||||
#[arg(long, default_value_t = 299792458)]
|
||||
seed: u64,
|
||||
|
||||
/// The length of the sample to generate (in tokens).
|
||||
#[arg(long, short = 'n', default_value_t = 5000)]
|
||||
sample_len: usize,
|
||||
|
||||
#[arg(long)]
|
||||
model_id: Option<String>,
|
||||
|
||||
#[arg(long)]
|
||||
revision: Option<String>,
|
||||
|
||||
#[arg(long)]
|
||||
weight_file: Option<String>,
|
||||
|
||||
#[arg(long)]
|
||||
tokenizer: Option<String>,
|
||||
|
||||
/// Penalty to be applied for repeating tokens, 1. means no penalty.
|
||||
#[arg(long, default_value_t = 1.1)]
|
||||
repeat_penalty: f32,
|
||||
|
||||
/// The context size to consider for the repeat penalty.
|
||||
#[arg(long, default_value_t = 64)]
|
||||
repeat_last_n: usize,
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
|
||||
let args = Args::parse();
|
||||
println!(
|
||||
"avx: {}, neon: {}, simd128: {}, f16c: {}",
|
||||
candle::utils::with_avx(),
|
||||
candle::utils::with_neon(),
|
||||
candle::utils::with_simd128(),
|
||||
candle::utils::with_f16c()
|
||||
);
|
||||
println!(
|
||||
"temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}",
|
||||
args.temperature.unwrap_or(0.),
|
||||
args.repeat_penalty,
|
||||
args.repeat_last_n
|
||||
);
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
println!("cache path {}",args.cache_path);
|
||||
let api = hf_hub::api::sync::ApiBuilder::from_cache(hf_hub::Cache::new(args.cache_path.into())).build()?;
|
||||
|
||||
let model_id = match args.model_id {
|
||||
Some(model_id) => model_id.to_string(),
|
||||
None => "THUDM/codegeex4-all-9b".to_string(),
|
||||
};
|
||||
let revision = match args.revision {
|
||||
Some(rev) => rev.to_string(),
|
||||
None => "main".to_string(),
|
||||
};
|
||||
let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision));
|
||||
let tokenizer_filename = match args.tokenizer {
|
||||
Some(file) => std::path::PathBuf::from(file),
|
||||
None => api
|
||||
.model("donjuanplatinum1/tokenizer".to_string())
|
||||
.get("chatglm-tokenizer.json")?,
|
||||
};
|
||||
let filenames = match args.weight_file {
|
||||
Some(weight_file) => vec![std::path::PathBuf::from(weight_file)],
|
||||
None => candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?,
|
||||
};
|
||||
println!("retrieved the files in {:?}", start.elapsed());
|
||||
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
let config = Config::codegeex4();
|
||||
let device = candle_examples::device(args.cpu)?;
|
||||
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? };
|
||||
let model = Model::new(&config, vb)?;
|
||||
|
||||
println!("loaded the model in {:?}", start.elapsed());
|
||||
|
||||
let mut pipeline = TextGeneration::new(
|
||||
model,
|
||||
tokenizer,
|
||||
args.seed,
|
||||
args.temperature,
|
||||
args.top_p,
|
||||
args.repeat_penalty,
|
||||
args.repeat_last_n,
|
||||
args.verbose_prompt,
|
||||
&device,
|
||||
);
|
||||
pipeline.run(&args.prompt, args.sample_len)?;
|
||||
Ok(())
|
||||
}
|
Loading…
Reference in New Issue
Block a user