diff --git a/Season1.step_into_chatgpt/1.Transformer/Transformer.pdf b/Season1.step_into_chatgpt/1.Transformer/Transformer.pdf
deleted file mode 100644
index 3560fd4..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/Transformer.pdf and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/Multi30K.png b/Season1.step_into_chatgpt/1.Transformer/assets/Multi30K.png
deleted file mode 100644
index 6a3feb7..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/Multi30K.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/attention.jpg b/Season1.step_into_chatgpt/1.Transformer/assets/attention.jpg
deleted file mode 100644
index c9e6505..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/attention.jpg and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/attention_in_tasks.png b/Season1.step_into_chatgpt/1.Transformer/assets/attention_in_tasks.png
deleted file mode 100644
index b3bfd23..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/attention_in_tasks.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/attention_qkv.png b/Season1.step_into_chatgpt/1.Transformer/assets/attention_qkv.png
deleted file mode 100644
index 21e036e..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/attention_qkv.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/cover.png b/Season1.step_into_chatgpt/1.Transformer/assets/cover.png
deleted file mode 100644
index d689732..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/cover.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/decoder.png b/Season1.step_into_chatgpt/1.Transformer/assets/decoder.png
deleted file mode 100644
index 502c9ee..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/decoder.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/decoder_self_attn_mask.png b/Season1.step_into_chatgpt/1.Transformer/assets/decoder_self_attn_mask.png
deleted file mode 100644
index 5c85c21..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/decoder_self_attn_mask.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/dot_product.png b/Season1.step_into_chatgpt/1.Transformer/assets/dot_product.png
deleted file mode 100644
index 84e160e..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/dot_product.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/encoder.png b/Season1.step_into_chatgpt/1.Transformer/assets/encoder.png
deleted file mode 100644
index 90b885f..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/encoder.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/fp.png b/Season1.step_into_chatgpt/1.Transformer/assets/fp.png
deleted file mode 100644
index 48916a3..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/fp.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/multihead_attention.png b/Season1.step_into_chatgpt/1.Transformer/assets/multihead_attention.png
deleted file mode 100644
index ddaf19a..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/multihead_attention.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/self_attention.png b/Season1.step_into_chatgpt/1.Transformer/assets/self_attention.png
deleted file mode 100644
index 45907f1..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/self_attention.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/table_of_contents.png b/Season1.step_into_chatgpt/1.Transformer/assets/table_of_contents.png
deleted file mode 100644
index 343b45b..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/table_of_contents.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/transformer.gif b/Season1.step_into_chatgpt/1.Transformer/assets/transformer.gif
deleted file mode 100644
index f50a0c2..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/transformer.gif and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/transformer.png b/Season1.step_into_chatgpt/1.Transformer/assets/transformer.png
deleted file mode 100644
index 9228919..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/transformer.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/transformer_structure.png b/Season1.step_into_chatgpt/1.Transformer/assets/transformer_structure.png
deleted file mode 100644
index 19ffd13..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/transformer_structure.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/assets/value_and_grad.png b/Season1.step_into_chatgpt/1.Transformer/assets/value_and_grad.png
deleted file mode 100644
index 5f46dbf..0000000
Binary files a/Season1.step_into_chatgpt/1.Transformer/assets/value_and_grad.png and /dev/null differ
diff --git a/Season1.step_into_chatgpt/1.Transformer/transformer-new.ipynb b/Season1.step_into_chatgpt/1.Transformer/transformer-new.ipynb
deleted file mode 100644
index 36c49b3..0000000
--- a/Season1.step_into_chatgpt/1.Transformer/transformer-new.ipynb
+++ /dev/null
@@ -1,2464 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "

"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## 环境配置\n",
- "\n",
- " python =3.9\n",
- " mindspore = 2.3.1\n",
- " nltk\n",
- "\n",
- "**在线运行代码平台链接:**\n",
- "- 1. [华为云AI Gallery](https://pangu.huaweicloud.com/gallery/asset-detail.html?id=b112c35e-18f9-4a22-bb40-347df216632c&ticket=ST-889261-TfXo7OcLPgDqDTPK4CpSVfFe-sso)\n",
- "- 2. [大模型平台AI实验室统一入口](https://xihe.mindspore.cn/projects)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "# 前言\n",
- "\n",
- "Transformer是一种神经网络结构,由Vaswani等人在2017年的论文“Attention Is All You Need”中提出,用于处理机器翻译、语言建模和文本生成等自然语言处理任务。\n",
- "\n",
- "Transformer与传统NLP特征提取类模型的区别主要在以下两点:\n",
- "\n",
- " - Transformer是一个纯基于注意力机制的结构,并将自注意力机制和多头注意力机制的概念运用到模型中;\n",
- " - 由于缺少RNN模型的时序性,Transformer引入了位置编码,在数据上而非模型中添加位置信息;\n",
- "\n",
- "以上的处理带来了几个优点:\n",
- " - 更容易并行化,训练更加高效;\n",
- " - 在处理长序列的任务中表现优秀,可以快速捕捉长距离中的关联信息。"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "# 注意力机制 (Attention)\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## 注意力机制\n",
- "\n",
- "如同阅读时,视线只会集中在正在阅读的部分;自然语言处理中,根据任务内容的不同,句子中需要更加关注的部分也会不同。\n",
- "\n",
- "
\n",
- "\n",
- "注意力机制便是在判断**词在句子中的重要性**,我们通过**注意力分数**来表达某个词在句子中的重要性,分数越高,说明该词对完成该任务的重要性越大。"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "计算注意力分数时,我们主要参考三个因素:**query**、**key**和**value**。\n",
- "\n",
- "- `query`:任务内容\n",
- "- `key`:索引/标签(帮助定位到答案)\n",
- "- `value`:答案\n",
- "\n",
- "在上面的例子中,如“情感分类”、“电影名字”、“中译英”等为`query`,每次对于任务内容的回答即为`value`。至于什么是`key`, 用一个比较直观的举例来说,每次登录视频网站搜索视频时,搜索的内容为`query`,搜索结果中显示的视频名称为`key`,它与任务内容相关,并可以引导我们至具体的视频内容(`value`)。\n",
- "\n",
- "
\n",
- "\n",
- "一般在文本翻译中,我们希望翻译后的句子的意思和原始句子相似,所以进行注意力分数计算时,`query`一般和目标序列,即翻译后的句子有关,`key`则与源序列,即翻译前的原始句子有关。"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "计算注意力分数,即为计算`query`与`key`的相似度。常用的计算注意力分数的方式有两种:`additive attention`和`scaled dot-product attention`,在这里我们主要介绍第二种方法。\n",
- "\n",
- "在几何角度,点积(dot product)表示一个向量在另一个向量方向上的投影。换句话说,从几何角度上解读,点积代表了某个向量中的多少是和另一个向量相似的。\n",
- "\n",
- "
\n",
- "\n",
- "> 图片来源: [Understanding the Dot Product](https://betterexplained.com/articles/vector-calculus-understanding-the-dot-product/) from BetterExplained\n",
- "\n",
- "将这个概念运用到当前的情境中,我们想要求`query`和`key`之间有多少是相似的,则需要计算`query`和`key`的点积。"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "同时,为了避免`query`($Q \\in R^{n\\times d_{model}}$)和`key`($K \\in R^{m\\times d_{model}}$)本身的“大小”影响到相似度的计算,我们需要在点乘后除以$\\sqrt{d_{model}}$。\n",
- "\n",
- "$$\\text{Attention Score}(Q, K)=\\frac{QK^T}{\\sqrt{d_{model}}}$$\n",
- "\n",
- "我们将该相似度的区间限制与0到1之间,并令其作用在`value`上。\n",
- "\n",
- "$$\\text{Attention}(Q, K, V) = \\text{softmax}\\left(\\frac{QK^T}{\\sqrt{d_{model}}}\\right)V$$\n",
- "\n",
- "在如下代码中,我们实现了scaled dot-product attention的计算, 调用类后,返回的是加权后的value(output)以及注意力权重(attn)。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [],
- "source": [
- "import mindspore\n",
- "from mindspore import nn\n",
- "from mindspore import ops\n",
- "from mindspore import Tensor\n",
- "from mindspore import dtype as mstype\n",
- "\n",
- "\n",
- "class ScaledDotProductAttention(nn.Cell):\n",
- " def __init__(self, dropout_p=0.):\n",
- " super().__init__()\n",
- " self.softmax = nn.Softmax()\n",
- " self.dropout = nn.Dropout(p=dropout_p)\n",
- " self.sqrt = ops.Sqrt()\n",
- "\n",
- "\n",
- " def construct(self, query, key, value, attn_mask=None):\n",
- " \"\"\"scaled dot product attention\"\"\"\n",
- "\n",
- " embed_size = query.shape[-1]\n",
- " scaling_factor = self.sqrt(Tensor(embed_size, mstype.float32))\n",
- " \n",
- "\n",
- " attn = ops.matmul(query, key.swapaxes(-2, -1) / scaling_factor)\n",
- "\n",
- "\n",
- " if attn_mask is not None:\n",
- " attn = attn.masked_fill(attn_mask, -1e9)\n",
- " \n",
- " attn = self.softmax(attn)\n",
- "\n",
- " attn = self.dropout(attn)\n",
- "\n",
- " output = ops.matmul(attn, value)\n",
- "\n",
- " return (output, attn)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(128, 8, 32, 64) (128, 8, 32, 32)\n"
- ]
- }
- ],
- "source": [
- "attention = ScaledDotProductAttention()\n",
- "q_s = k_s = v_s = ops.ones((128, 8, 32, 64), mindspore.float32)\n",
- "attn_mask = ops.ones((128, 8, 32, 32), mindspore.float32)\n",
- "attn_mask = mindspore.ops.gt(attn_mask, attn_mask)\n",
- "output, attn = attention(q_s, k_s, v_s, attn_mask)\n",
- "print(output.shape, attn.shape)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "在处理数据时,我们为了统一长度,会使用 <pad> 占位符补齐了一些稍短的文本。\n",
- "\n",
- "\"Hello world!\" --> <bos> hello world ! <eos> <pad> <pad>\n",
- "\n",
- "这些 <pad> 占位符没有任何意义,不应该参与注意力分数计算中。为此我们在注意力中加入了 padding 掩码,即识别输入序列中的 <pad> 占位符,保证计算时这些位置对应的注意力分数为0。\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "def get_attn_pad_mask(seq_q, seq_k, pad_idx):\n",
- " \"\"\"注意力掩码:识别序列中的占位符\n",
- "\n",
- " Args:\n",
- " seq_q (Tensor): query序列,shape = [batch size, query len]\n",
- " seq_k (Tensor): key序列,shape = [batch size, key len]\n",
- " pad_idx (Tensor): key序列占位符对应的数字索引\n",
- " \"\"\"\n",
- " batch_size, len_q = seq_q.shape\n",
- " batch_size, len_k = seq_k.shape\n",
- "\n",
- " pad_attn_mask = ops.equal(seq_k, pad_idx)\n",
- "\n",
- " pad_attn_mask = pad_attn_mask.expand_dims(1)\n",
- "\n",
- " pad_attn_mask = ops.broadcast_to(pad_attn_mask, (batch_size, len_q, len_k))\n",
- "\n",
- " return pad_attn_mask"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[[[False False True True]\n",
- "\n",
- " [False False True True]\n",
- "\n",
- " [False False True True]\n",
- "\n",
- " [False False True True]]]\n",
- "\n",
- "(1, 4) (1, 4, 4)\n"
- ]
- }
- ],
- "source": [
- "q = k = Tensor([[1, 1, 0, 0]], mstype.float32)\n",
- "pad_idx = 0\n",
- "mask = get_attn_pad_mask(q, k, pad_idx)\n",
- "print(mask)\n",
- "print(q.shape, mask.shape)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## 自注意力机制(Self-Attention)\n",
- "\n",
- "自注意力机制中,我们关注句子本身,查看每个单词对于周边单词的重要性。这样可以很好地理清句子中的逻辑关系,如代词指代。\n",
- "\n",
- "举个例子,在'`The animal` didn't cross the street because `it` was too tired'这句话中,'it'指代句中的'The animal',所以自注意力会赋予'The'、'animal'更高的注意力分值。\n",
- "\n",
- "
\n",
- "\n",
- "> 图片来源: [The Illustrated Transformer](http://jalammar.github.io/illustrated-transformer/) by Jay Alammer"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "自注意力分数的计算还是遵循着上述的公式,只不过这里的`query`, `key`和`value`都变成了句子本身点乘各自权重。\n",
- "\n",
- "给定序列$X \\in \\mathbb{R}^{n \\times d_{model}}$,序列长度为$n$,维度为$d_{model}$。在计算自注意力时,$Q = W^QX, K = W^KX, V = W^VX$。\n",
- "\n",
- "$$\\text{Attention}(Q, K, V) = \\text{softmax}\\left(\\frac{QK^T}{\\sqrt{d_{model}}}\\right)V$$\n",
- "\n",
- "其中,序列中位置为$i$的词与位置为$j$的词之间的自注意力分数为:\n",
- "\n",
- "$$\\text{Attention}(Q, K, V)_{i,j} = \\frac{\\text{exp}\\left(\\frac{Q_iK_j^T}{\\sqrt{d_{model}}}\\right)}{\\sum_{k=1}^{n}\\text{exp}\\left(\\frac{Q_iK_k^T}{\\sqrt{d_{model}}}\\right)}V_j$$"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## 多头注意力(Multi-Head Attention)\n",
- "\n",
- "
\n",
- "\n",
- "> 图片来源:Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need, 2017.\n",
- "\n",
- "多头注意力是注意力机制的扩展,它可以使模型通过不同的方式关注输入序列的不同部分,从而提升模型的训练效果。\n",
- "\n",
- "不同于之前一次计算整体输入的注意力分数,多头注意力是多次计算,每次计算输入序列中某一部分的注意力分数,最后再将结果进行整合。"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "多头注意力通过对输入的embedding乘以不同的权重参数$W^{Q}$、$W^{K}$和$W^{V}$,将其映射到多个小维度空间中,我们称之为“头”(head),每个头部会并行计算自己的自注意力分数。\n",
- "\n",
- "$$\\text{head}_i = \\text{Attention}(QW^Q_i, KW^K_i, VW^V_i) = \\text{softmax}\\left(\\frac{Q_iK_i^T}{\\sqrt{d_{k}}}\\right)V_i$$\n",
- "\n",
- "$W^Q_i \\in \\mathbb{R}^{d_{model}\\times d_{k}}$、$W^K_i \\in \\mathbb{R}^{d_{model}\\times d_{k}}$和$W^V_i \\in \\mathbb{R}^{d_{model}\\times d_{v}}$为可学习的权重参数。一般为了平衡计算成本,我们会取$d_k = d_v = d_{model} / n_{head}$。\n",
- "\n",
- "在获得多组自注意力分数后,我们将结果拼接到一起,得到多头注意力的最终输出。$W^O$为可学习的权重参数,用于将拼接后的多头注意力输出映射回原来的维度。\n",
- "\n",
- "$$\\text{MultiHead}(Q, K, V)=\\text{Concat}(\\text{head}_1, ..., \\text{head}_h)W^O$$\n",
- "\n",
- "简单来说,在多头注意力中,每个头部可以'解读'输入内容的不同方面,比如:捕捉全局依赖关系、关注特定语境下的词元、识别词和词之间的语法关系等。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [],
- "source": [
- "class MultiHeadAttention(nn.Cell):\n",
- " def __init__(self, d_model, d_k, n_heads, dropout_p=0.):\n",
- " super().__init__()\n",
- " self.n_heads = n_heads\n",
- " self.d_k = d_k\n",
- " self.W_Q = nn.Dense(d_model, d_k * n_heads)\n",
- " self.W_K = nn.Dense(d_model, d_k * n_heads)\n",
- " self.W_V = nn.Dense(d_model, d_k * n_heads)\n",
- " self.W_O = nn.Dense(n_heads * d_k, d_model)\n",
- " self.attention = ScaledDotProductAttention(dropout_p=dropout_p)\n",
- "\n",
- " def construct(self, query, key, value, attn_mask):\n",
- " \"\"\"\n",
- " query: [batch_size, len_q, d_model]\n",
- " key: [batch_size, len_k, d_model]\n",
- " value: [batch_size, len_k, d_model]\n",
- " attn_mask: [batch_size, seq_len, seq_len]\n",
- " \"\"\"\n",
- "\n",
- " batch_size = query.shape[0]\n",
- "\n",
- " q_s = self.W_Q(query).view(batch_size, -1, self.n_heads, self.d_k)\n",
- " k_s = self.W_K(key).view(batch_size, -1, self.n_heads, self.d_k)\n",
- " v_s = self.W_V(value).view(batch_size, -1, self.n_heads, self.d_k)\n",
- "\n",
- " q_s = q_s.transpose((0, 2, 1, 3))\n",
- " k_s = k_s.transpose((0, 2, 1, 3))\n",
- " v_s = v_s.transpose((0, 2, 1, 3))\n",
- "\n",
- " attn_mask = attn_mask.expand_dims(1)\n",
- " attn_mask = ops.tile(attn_mask, (1, self.n_heads, 1, 1))\n",
- "\n",
- " context, attn = self.attention(q_s, k_s, v_s, attn_mask)\n",
- "\n",
- " context = context.transpose((0, 2, 1, 3)).view((batch_size, -1, self.n_heads * self.d_k))\n",
- "\n",
- " output = self.W_O(context)\n",
- "\n",
- " return output, attn"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1, 2, 10) (1, 5, 2, 2)\n"
- ]
- }
- ],
- "source": [
- "dmodel, dk, nheads = 10, 2, 5\n",
- "q = k = v = ops.ones((1, 2, 10), mstype.float32)\n",
- "attn_mask = Tensor([False]).broadcast_to((1, 2, 2))\n",
- "multi_head_attn = MultiHeadAttention(dmodel, dk, nheads)\n",
- "output, attn = multi_head_attn(q, k, v, attn_mask)\n",
- "print(output.shape, attn.shape)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "# Transformer结构\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## Transformer 结构\n",
- "\n",
- "Transformer同样是encoder-decoder的结构,只不过这里的“encoder”和“decoder”是由无数个同样结构的encoder层和decoder层堆叠组成。\n",
- "\n",
- "在进行机器翻译时,encoder解读源语句(被翻译的句子)的信息,并传输给decoder。decoder接收源语句信息后,结合当前输入(目前翻译的情况),预测下一个单词,直到生成完整的句子。\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## Transformer 结构\n",
- "\n",
- "Transformer的具体结构如下图所示,在进入encoder或decoder前,源序列和目标序列需要经过一些“加工”。\n",
- "\n",
- "1. word embedding: 将序列转换为模型所能理解的词向量表示,其中包含了序列的**内容信息**。\n",
- "2. positional encoding:在内容信息的基础上添加**位置信息**。\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## 位置编码(Positional Encoding)\n",
- "\n",
- "Transformer模型不包含RNN,所以无法在模型中记录时序信息,这样会导致模型无法识别由顺序改变而产生的句子含义的改变,如“我爱我的小猫”和“我的小猫爱我”。\n",
- "\n",
- "为了弥补这个缺陷,我们选择在输入数据中额外添加表示位置信息的位置编码。\n",
- "\n",
- "位置编码$PE$的形状与经过word embedding后的输出$X$相同,对于索引为[pos, 2i]的元素,以及索引为[pos, 2i+1]的元素,位置编码的计算如下:\n",
- "\n",
- "$$PE_{(pos,2i)} = \\sin\\Bigg(\\frac{pos}{10000^{2i/d_{\\text{model}}}}\\Bigg)$$\n",
- "\n",
- "$$PE_{(pos,2i+1)} = \\cos\\Bigg(\\frac{pos}{10000^{2i/d_{\\text{model}}}}\\Bigg)$$\n",
- "\n",
- "在下面的代码中,我们实现了位置编码,输入经过word embedding后的结果$X$,输出添加位置信息后的结果$X + PE$。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [],
- "source": [
- "from mindspore import numpy as mnp\n",
- "\n",
- "class PositionalEncoding(nn.Cell):\n",
- " \"\"\"位置编码\"\"\"\n",
- "\n",
- " def __init__(self, d_model, dropout_p=0.1, max_len=100):\n",
- " super().__init__()\n",
- " self.dropout = nn.Dropout(p = dropout_p)\n",
- "\n",
- " self.pe = ops.Zeros()((max_len, d_model), mstype.float32)\n",
- "\n",
- " pos = mnp.arange(0, max_len, dtype=mstype.float32).view((-1, 1))\n",
- " angle = ops.pow(10000.0, mnp.arange(0, d_model, 2, dtype=mstype.float32)/d_model)\n",
- "\n",
- " self.pe[:, 0::2] = ops.sin(pos/angle)\n",
- " self.pe[:, 1::2] = ops.cos(pos/angle)\n",
- "\n",
- " def construct(self, x):\n",
- " batch_size = x.shape[0]\n",
- "\n",
- " pe = self.pe.expand_dims(0)\n",
- " pe = ops.broadcast_to(pe, (batch_size, -1, -1))\n",
- "\n",
- " x = x + pe[:, :x.shape[1], :]\n",
- " return self.dropout(x)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[[[0. 1. 0. 1. ]\n",
- "\n",
- " [0.841471 0.5403023 0.00999983 0.99995 ]]]\n"
- ]
- }
- ],
- "source": [
- "x = ops.Zeros()((1, 2, 4), mstype.float32)\n",
- "pe = PositionalEncoding(4)\n",
- "print(pe(x))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## 编码器(Encoder)\n",
- "\n",
- "Transformer的Encoder负责处理输入的源序列,并将输入信息整合为一系列的上下文向量(context vector)输出。\n",
- "\n",
- "每个encoder层中存在两个子层:多头自注意力(multi-head self-attention)和基于位置的前馈神经网络(position-wise feed-forward network)。\n",
- "\n",
- "子层之间使用了残差连接(residual connection),并使用了层规范化(layer normalization)。二者统称为“Add & Norm”\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "### 基于位置的前馈神经网络 (Position-Wise Feed-Forward Network)\n",
- "\n",
- "基于位置的前馈神经网络被用来对输入中的每个位置进行非线性变换。它由两个线性层组成,层与层之间需要经过ReLU激活函数。\n",
- "\n",
- "$$\\mathrm{FFN}(x) = \\mathrm{ReLU}(xW_1 + b_1)W_2 + b_2$$\n",
- "\n",
- "相比固定的ReLU函数,基于位置的前馈神经网络可以处理更加复杂的关系,并且由于前馈网络是基于位置的,可以捕获到不同位置的信息,并为每个位置提供不同的转换。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "class PoswiseFeedForward(nn.Cell):\n",
- " def __init__(self, d_ff, d_model, dropout_p=0.):\n",
- " super().__init__()\n",
- " self.linear1 = nn.Dense(d_model, d_ff)\n",
- " self.linear2 = nn.Dense(d_ff, d_model)\n",
- " self.dropout = nn.Dropout(p=dropout_p)\n",
- " self.relu = nn.ReLU()\n",
- "\n",
- " def construct(self, x):\n",
- " \"\"\"前馈神经网络\n",
- " x: [batch_size, seq_len, d_model]\n",
- " \"\"\"\n",
- " x = self.linear1(x)\n",
- " x = self.relu(x)\n",
- " x = self.dropout(x)\n",
- " output = self.linear2(x)\n",
- " return output"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1, 2, 4)\n"
- ]
- }
- ],
- "source": [
- "x = ops.ones((1, 2, 4), mstype.float32)\n",
- "ffn = PoswiseFeedForward(16, 4)\n",
- "print(ffn(x).shape)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## Add & Norm\n",
- "\n",
- "Add & Norm层本质上是残差连接后紧接了一个LayerNorm层。\n",
- "\n",
- "$\\text{Add\\&Norm}(x) = \\text{LayerNorm}(x + \\text{Sublayer}(x))$\n",
- "\n",
- "- Add:残差连接,帮助缓解网络退化问题,注意需要满足$x$与$\\text{SubLayer}(x)的形状一致$;\n",
- "- Norm:Layer Norm,层归一化,帮助模型更快地进行收敛;"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "class AddNorm(nn.Cell):\n",
- " def __init__(self, d_model, dropout_p=0.):\n",
- " super().__init__()\n",
- " self.layer_norm = nn.LayerNorm((d_model, ), epsilon=1e-5)\n",
- " self.dropout = nn.Dropout(p=dropout_p)\n",
- " \n",
- " def construct(self, x, residual):\n",
- " return self.layer_norm(self.dropout(x) + residual)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 13,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1, 2, 4)\n"
- ]
- }
- ],
- "source": [
- "x = ops.ones((1, 2, 4), mstype.float32)\n",
- "residual = ops.ones((1, 2, 4), mstype.float32)\n",
- "add_norm = AddNorm(4)\n",
- "print(add_norm(x, residual).shape)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "### EncoderLayer\n",
- "\n",
- "我们首先实现encoder中的一个层。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 14,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "class EncoderLayer(nn.Cell):\n",
- " def __init__(self, d_model, n_heads, d_ff, dropout_p=0.):\n",
- " super().__init__()\n",
- " d_k = d_model // n_heads\n",
- " if d_k * n_heads != d_model:\n",
- " raise ValueError(f\"The `d_model` {d_model} can not be divisible by `num_heads` {n_heads}.\")\n",
- " self.enc_self_attn = MultiHeadAttention(d_model, d_k, n_heads, dropout_p)\n",
- " self.pos_ffn = PoswiseFeedForward(d_ff, d_model, dropout_p)\n",
- " self.add_norm1 = AddNorm(d_model, dropout_p)\n",
- " self.add_norm2 = AddNorm(d_model, dropout_p)\n",
- " \n",
- " def construct(self, enc_inputs, enc_self_attn_mask):\n",
- " \"\"\"\n",
- " enc_inputs: [batch_size, src_len, d_model]\n",
- " enc_self_attn_mask: [batch_size, src_len, src_len]\n",
- " \"\"\"\n",
- " residual = enc_inputs\n",
- "\n",
- " enc_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs, enc_self_attn_mask)\n",
- "\n",
- " enc_outputs = self.add_norm1(enc_outputs, residual)\n",
- " residual = enc_outputs\n",
- "\n",
- " enc_outputs = self.pos_ffn(enc_outputs)\n",
- "\n",
- " enc_outputs = self.add_norm2(enc_outputs, residual)\n",
- "\n",
- " return enc_outputs, attn"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 15,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1, 2, 8) (1, 4, 2, 2)\n"
- ]
- }
- ],
- "source": [
- "x = ops.ones((1, 2, 8), mstype.float32)\n",
- "mask = Tensor([False]).broadcast_to((1, 2, 2))\n",
- "encoder_layer = EncoderLayer(8, 4, 16)\n",
- "output, attn = encoder_layer(x, mask)\n",
- "print(output.shape, attn.shape)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "### Encoder\n",
- "\n",
- "将上面实现的encoder层堆叠`n_layers`次,并添加wording embedding与positional encoding。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 16,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "class Encoder(nn.Cell):\n",
- " def __init__(self, src_vocab_size, d_model, n_heads, d_ff, n_layers, dropout_p=0.):\n",
- " super().__init__()\n",
- " self.src_emb = nn.Embedding(src_vocab_size, d_model)\n",
- " self.pos_emb = PositionalEncoding(d_model, dropout_p)\n",
- " self.layers = nn.CellList([EncoderLayer(d_model, n_heads, d_ff, dropout_p) for _ in range(n_layers)])\n",
- " self.scaling_factor = ops.Sqrt()(Tensor(d_model, mstype.float32))\n",
- "\n",
- " \n",
- " def construct(self, enc_inputs, src_pad_idx):\n",
- " \"\"\"enc_inputs : [batch_size, src_len]\n",
- " \"\"\"\n",
- " enc_outputs = self.src_emb(enc_inputs.astype(mstype.int32))\n",
- " enc_outputs = self.pos_emb(enc_outputs * self.scaling_factor)\n",
- "\n",
- " enc_self_attn_mask = get_attn_pad_mask(enc_inputs, enc_inputs, src_pad_idx)\n",
- "\n",
- " enc_self_attns = []\n",
- " for layer in self.layers:\n",
- " enc_outputs, enc_self_attn = layer(enc_outputs, enc_self_attn_mask)\n",
- " enc_self_attns.append(enc_self_attn)\n",
- " return enc_outputs, enc_self_attns"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## 解码器 (Decoder)\n",
- "\n",
- "
\n",
- "\n",
- "解码器将编码器输出的上下文序列转换为目标序列的预测结果$\\hat{Y}$,该输出将在模型训练中与真实目标输出$Y$进行比较,计算损失。\n",
- "\n",
- "不同于编码器,每个Decoder层中包含两层多头注意力机制,并在最后多出一个线性层,输出对目标序列的预测结果。\n",
- "\n",
- "- 第一层:计算目标序列的注意力分数的**掩码多头自注意力**;\n",
- "- 第二层:用于计算上下文序列与目标序列对应关系,其中Decoder掩码多头注意力的输出作为query,Encoder的输出(上下文序列)作为key和value;"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "### 带掩码的多头注意力\n",
- "\n",
- "在处理目标序列的输入时,t时刻的模型只能“观察”直到t-1时刻的所有词元,后续的词语不应该一并输入Decoder中。\n",
- "\n",
- "为了保证在t时刻,只有t-1个词元作为输入参与多头注意力分数的计算,我们需要在第一个多头注意力中额外增加一个时间掩码,使目标序列中的词随时间发展逐个被暴露出来。\n",
- "\n",
- "该注意力掩码可通过三角矩阵实现,对角线以上的词元表示为不参与注意力计算的词元,标记为1。\n",
- "\n",
- "$$\\begin{matrix}\n",
- "0 & 1 & 1 & 1 & 1\\\\\n",
- "0 & 0 & 1 & 1 & 1\\\\\n",
- "0 & 0 & 0 & 1 & 1\\\\\n",
- "0 & 0 & 0 & 0 & 1\\\\\n",
- "0 & 0 & 0 & 0 & 0\\\\\n",
- "\\end{matrix}$$\n",
- "\n",
- "该掩码一般被称作subsequent mask。"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "最后,将subsequent mask和padding mask合并为一个整体的掩码,确保模型既不会注意到t时刻以后的词元,也不会关注为 <pad> 的词元。\n",
- "\n",
- "
"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 17,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [],
- "source": [
- "def get_attn_subsequent_mask(seq_q, seq_k):\n",
- " \"\"\"生成时间掩码,使decoder在第t时刻只能看到序列的前t-1个元素\n",
- " \n",
- " Args:\n",
- " seq_q (Tensor): query序列,shape = [batch size, len_q]\n",
- " seq_k (Tensor): key序列,shape = [batch size, len_k]\n",
- " \"\"\"\n",
- " batch_size, len_q = seq_q.shape\n",
- " batch_size, len_k = seq_k.shape\n",
- "\n",
- " ones = ops.ones((batch_size, len_q, len_k), mindspore.float32)\n",
- " subsequent_mask = mnp.triu(ones, k=1)\n",
- " return subsequent_mask"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 18,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "[[[0. 1. 1. 1.]\n",
- "\n",
- " [0. 0. 1. 1.]\n",
- "\n",
- " [0. 0. 0. 1.]\n",
- "\n",
- " [0. 0. 0. 0.]]]\n"
- ]
- }
- ],
- "source": [
- "q = k = ops.ones((1, 4), mstype.float32)\n",
- "mask = get_attn_subsequent_mask(q, k)\n",
- "print(mask)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "### Decoder Layer\n",
- "\n",
- "首先实现Decoder中的一个层。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 19,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "class DecoderLayer(nn.Cell):\n",
- " def __init__(self, d_model, n_heads, d_ff, dropout_p=0.):\n",
- " super().__init__()\n",
- " d_k = d_model // n_heads\n",
- " if d_k * n_heads != d_model:\n",
- " raise ValueError(f\"The `d_model` {d_model} can not be divisible by `num_heads` {n_heads}.\")\n",
- " self.dec_self_attn = MultiHeadAttention(d_model, d_k, n_heads, dropout_p)\n",
- " self.dec_enc_attn = MultiHeadAttention(d_model, d_k, n_heads, dropout_p)\n",
- " self.pos_ffn = PoswiseFeedForward(d_ff, d_model, dropout_p)\n",
- " self.add_norm1 = AddNorm(d_model, dropout_p)\n",
- " self.add_norm2 = AddNorm(d_model, dropout_p)\n",
- " self.add_norm3 = AddNorm(d_model, dropout_p)\n",
- " \n",
- " def construct(self, dec_inputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask):\n",
- " \"\"\"\n",
- " dec_inputs: [batch_size, trg_len, d_model]\n",
- " enc_outputs: [batch_size, src_len, d_model]\n",
- " dec_self_attn_mask: [batch_size, trg_len, trg_len]\n",
- " dec_enc_attn_mask: [batch_size, trg_len, src_len]\n",
- " \"\"\"\n",
- " residual = dec_inputs\n",
- "\n",
- " dec_outputs, dec_self_attn = self.dec_self_attn(dec_inputs, dec_inputs, dec_inputs, dec_self_attn_mask)\n",
- "\n",
- " dec_outputs = self.add_norm1(dec_outputs, residual)\n",
- " residual = dec_outputs\n",
- " \n",
- " dec_outputs, dec_enc_attn = self.dec_enc_attn(dec_outputs, enc_outputs, enc_outputs, dec_enc_attn_mask)\n",
- "\n",
- " dec_outputs = self.add_norm2(dec_outputs, residual)\n",
- " residual = dec_outputs\n",
- "\n",
- " dec_outputs = self.pos_ffn(dec_outputs)\n",
- "\n",
- " dec_outputs = self.add_norm3(dec_outputs, residual)\n",
- "\n",
- " return dec_outputs, dec_self_attn, dec_enc_attn"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 20,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1, 2, 4) (1, 1, 2, 2) (1, 1, 2, 2)\n"
- ]
- }
- ],
- "source": [
- "x = y = ops.ones((1, 2, 4), mstype.float32)\n",
- "mask1 = mask2 = Tensor([False]).broadcast_to((1, 2, 2))\n",
- "decoder_layer = DecoderLayer(4, 1, 16)\n",
- "output, attn1, attn2 = decoder_layer(x, y, mask1, mask2)\n",
- "print(output.shape, attn1.shape, attn2.shape)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "### Decoder\n",
- "\n",
- "将上面实现的DecoderLayer堆叠`n_layer`次,添加word embedding与positional encoding,以及最后的线性层。\n",
- "\n",
- "输出的`dec_outputs`为对目标序列的预测。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 21,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "class Decoder(nn.Cell):\n",
- " def __init__(self, trg_vocab_size, d_model, n_heads, d_ff, n_layers, dropout_p=0.):\n",
- " super().__init__()\n",
- " self.trg_emb = nn.Embedding(trg_vocab_size, d_model)\n",
- " self.pos_emb = PositionalEncoding(d_model, dropout_p)\n",
- " self.layers = nn.CellList([DecoderLayer(d_model, n_heads, d_ff) for _ in range(n_layers)])\n",
- " self.projection = nn.Dense(d_model, trg_vocab_size)\n",
- " self.scaling_factor = ops.Sqrt()(Tensor(d_model, mstype.float32)) \n",
- " \n",
- " def construct(self, dec_inputs, enc_inputs, enc_outputs, src_pad_idx, trg_pad_idx):\n",
- " \"\"\"\n",
- " dec_inputs: [batch_size, trg_len]\n",
- " enc_inputs: [batch_size, src_len]\n",
- " enc_outputs: [batch_size, src_len, d_model]\n",
- " \"\"\"\n",
- " dec_outputs = self.trg_emb(dec_inputs.astype(mstype.int32))\n",
- " dec_outputs = self.pos_emb(dec_outputs * self.scaling_factor)\n",
- "\n",
- " dec_self_attn_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs, trg_pad_idx)\n",
- " dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_inputs, dec_inputs)\n",
- " dec_self_attn_mask = ops.gt((dec_self_attn_pad_mask + dec_self_attn_subsequent_mask), 0)\n",
- "\n",
- " dec_enc_attn_mask = get_attn_pad_mask(dec_inputs, enc_inputs, src_pad_idx)\n",
- "\n",
- " dec_self_attns, dec_enc_attns = [], []\n",
- " for layer in self.layers:\n",
- " dec_outputs, dec_self_attn, dec_enc_attn = layer(dec_outputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask)\n",
- " dec_self_attns.append(dec_self_attn)\n",
- " dec_enc_attns.append(dec_enc_attn)\n",
- "\n",
- " dec_outputs = self.projection(dec_outputs)\n",
- " return dec_outputs, dec_self_attns, dec_enc_attns"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## Transformer\n",
- "\n",
- "将实现的Encoder与Decoder组合起来。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 22,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "class Transformer(nn.Cell):\n",
- " def __init__(self, encoder, decoder):\n",
- " super().__init__()\n",
- " self.encoder = encoder\n",
- " self.decoder = decoder\n",
- " \n",
- " def construct(self, enc_inputs, dec_inputs, src_pad_idx, trg_pad_idx):\n",
- " \"\"\"\n",
- " enc_inputs: [batch_size, src_len]\n",
- " dec_inputs: [batch_size, trg_len]\n",
- " \"\"\"\n",
- " enc_outputs, enc_self_attns = self.encoder(enc_inputs, src_pad_idx)\n",
- "\n",
- " dec_outputs, dec_self_attns, dec_enc_attns = self.decoder(dec_inputs, enc_inputs, enc_outputs, src_pad_idx, trg_pad_idx)\n",
- "\n",
- " dec_logits = dec_outputs.view((-1, dec_outputs.shape[-1]))\n",
- "\n",
- " return dec_logits, enc_self_attns, dec_self_attns, dec_enc_attns\n",
- " "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "# 通过Transformer实现文本机器翻译\n",
- "\n",
- "全流程\n",
- "\n",
- "- 数据预处理: 将图像、文本等数据处理为可以计算的Tensor\n",
- "- 模型构建: 使用框架API, 搭建模型\n",
- "- 模型训练: 定义模型**训练逻辑**, 遍历**训练集**进行训练\n",
- "- 模型评估: 使用训练好的模型, 在**测试集**评估效果\n",
- "- 模型推理: 将训练好的模型部署, 输入新数据获得预测结果"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## 数据准备\n",
- "\n",
- "我们本次使用的数据集为**Multi30K数据集**,它是一个大规模的图像-文本数据集,包含30K+图片,每张图片对应两类不同的文本描述:\n",
- "- 英语描述,及对应的德语翻译;\n",
- "- 五个独立的、非翻译而来的英语和德语描述,描述中包含的细节并不相同;\n",
- "\n",
- "因其收集的不同语言对于图片的描述相互独立,所以训练出的模型可以更好地适用于有噪声的多模态内容。\n",
- "\n",
- "
\n",
- "\n",
- "\n",
- "> 图片来源:Elliott, D., Frank, S., Sima’an, K., & Specia, L. (2016). Multi30K: Multilingual English-German Image Descriptions. CoRR, 1605.00459.\n",
- "\n",
- "在本次文本翻译任务中,德语是源语言(source languag),英语是目标语言(target language)。"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "### 数据下载模块\n",
- "\n",
- "使用`download`进行数据下载,并将`tar.gz`文件解压到指定文件夹。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 23,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Downloading data from https://modelscope.cn/api/v1/datasets/SelinaRR/Multi30K/repo?Revision=master&FilePath=Multi30K.zip (1 byte)\n",
- "\n",
- "\n",
- "\n",
- "file_sizes: 1.37MB [00:00, 20.2MB/s] \n",
- "\n",
- "Extracting zip file...\n",
- "\n",
- "Successfully downloaded / unzipped to ./\n",
- "\n",
- "========================================datasets in ./datasets/train/train.de========================================\n",
- "\n",
- "0 Zwei junge weiße Männer sind im Freien in der Nähe vieler Büsche.\n",
- "\n",
- "1 Mehrere Männer mit Schutzhelmen bedienen ein Antriebsradsystem.\n",
- "\n",
- "2 Ein kleines Mädchen klettert in ein Spielhaus aus Holz.\n",
- "\n",
- "3 Ein Mann in einem blauen Hemd steht auf einer Leiter und putzt ein Fenster.\n",
- "\n",
- "4 Zwei Männer stehen am Herd und bereiten Essen zu.\n",
- "\n",
- "========================================datasets in ./datasets/train/train.en========================================\n",
- "\n",
- "0 Two young, White males are outside near many bushes.\n",
- "\n",
- "1 Several men in hard hats are operating a giant pulley system.\n",
- "\n",
- "2 A little girl climbing into a wooden playhouse.\n",
- "\n",
- "3 A man in a blue shirt is standing on a ladder cleaning a window.\n",
- "\n",
- "4 Two men are at the stove preparing food.\n"
- ]
- }
- ],
- "source": [
- "from download import download\n",
- "import re\n",
- "\n",
- "url = \"https://modelscope.cn/api/v1/datasets/SelinaRR/Multi30K/repo?Revision=master&FilePath=Multi30K.zip\"\n",
- "\n",
- "download(url, './', kind='zip', replace=True)\n",
- "\n",
- "datasets_path = './datasets/'\n",
- "train_path = datasets_path + 'train/'\n",
- "valid_path = datasets_path + 'valid/'\n",
- "test_path = datasets_path + 'test/'\n",
- "\n",
- "def print_data(data_file_path, print_n=5):\n",
- " print(\"=\" * 40 + \"datasets in {}\".format(data_file_path) + \"=\" * 40)\n",
- " with open(data_file_path, 'r', encoding='utf-8') as en_file:\n",
- " en = en_file.readlines()[:print_n]\n",
- " for index, seq in enumerate(en):\n",
- " print(index, seq.replace('\\n', ''))\n",
- "\n",
- "\n",
- "print_data(train_path + 'train.de')\n",
- "print_data(train_path + 'train.en')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "### 数据预处理\n",
- "\n",
- "在使用数据进行模型训练等操作时,我们需要对数据进行预处理,流程如下:\n",
- "\n",
- "1. 加载数据集;\n",
- "2. 构建词典;\n",
- "3. 创建数据迭代器;"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "#### 数据加载器\n",
- "\n",
- "加载数据集,并进行分词,即将句子拆解为单独的词元(token,可以为字符或者单词)。一般在机器翻译类任务中,我们习惯进行单词级词元化,即每个词元要么为一个单词,要么为一个标点符号。同一个单词,不论首字母是否大写,都应该对应同一个词元,故在分词前,我们需统一将单词转换为小写。\n",
- "\n",
- "\n",
- "\"Hello world!\" --> [\"hello\", \"world\", \"!\"]\n",
- "\n",
- "\n",
- "接下来,我们创建数据加载器`Multi30K`。后期调用该类进行遍历时,每次返回当前源语言(德语)与目标语言(英语)文本描述的词元列表。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 24,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [],
- "source": [
- "import os\n",
- "\n",
- "class Multi30K():\n",
- " \"\"\"Multi30K数据集加载器\n",
- " \n",
- " 加载Multi30K数据集并处理为一个Python迭代对象。\n",
- " \n",
- " \"\"\"\n",
- " def __init__(self, path):\n",
- " self.data = self._load(path)\n",
- " \n",
- " def _load(self, path):\n",
- " def tokenize(text):\n",
- " text = text.rstrip()\n",
- " return [tok.lower() for tok in re.findall(r'\\w+|[^\\w\\s]', text)]\n",
- " \n",
- " members = {i.split('.')[-1]: i for i in os.listdir(path)}\n",
- " de_path = os.path.join(path, members['de'])\n",
- " en_path = os.path.join(path, members['en'])\n",
- " with open(de_path, 'r', encoding='utf-8') as de_file:\n",
- " de = de_file.readlines()[:-1]\n",
- " de = [tokenize(i) for i in de]\n",
- " with open(en_path, 'r', encoding='utf-8') as en_file:\n",
- " en = en_file.readlines()[:-1]\n",
- " en = [tokenize(i) for i in en]\n",
- "\n",
- " return list(zip(de, en))\n",
- " \n",
- " def __getitem__(self, idx):\n",
- " return self.data[idx]\n",
- " \n",
- " def __len__(self):\n",
- " return len(self.data)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 25,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "train_dataset, valid_dataset, test_dataset = Multi30K(train_path), Multi30K(valid_path), Multi30K(test_path)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "对解压和分词结果进行测试,打印测试数据集第一组英德语文本,可以看到每一个单词和标点符号已经被单独分离出来。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 26,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "de = ['ein', 'mann', 'mit', 'einem', 'orangefarbenen', 'hut', ',', 'der', 'etwas', 'anstarrt', '.']\n",
- "\n",
- "en = ['a', 'man', 'in', 'an', 'orange', 'hat', 'starring', 'at', 'something', '.']\n"
- ]
- }
- ],
- "source": [
- "for de, en in test_dataset:\n",
- " print(f'de = {de}')\n",
- " print(f'en = {en}')\n",
- " break"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "#### 词典\n",
- "\n",
- "将每个词元映射到从0开始的数字索引中(为节约存储空间,可过滤掉词频低的词元),词元和数字索引所构成的集合叫做词典(vocabulary)。\n",
- "\n",
- "以上述“Hello world!”为例,该序列组成的词典为:\n",
- "\n",
- "\n",
- "{\"<unk>\": 0, \"<pad>\": 1, \"<bos>\": 2, \"<eos>\": 3, \"hello\": 4, \"world\": 5, \"!\": 6}\n",
- "\n",
- "\n",
- "在构建词典中,我们使用了4个特殊词元。\n",
- "\n",
- "- <unk>:未知词元(unknown),将出现次数少于一定频率的单词统一判定为未知词元;\n",
- "- <bos>:起始词元(begin of sentence),用来标注一个句子的开始;\n",
- "- <eos>:结束词元(end of sentence),用来标注一个句子的结束;\n",
- "- <pad>:填充词元(padding),当句子长度不够时将句子填充至统一长度;\n",
- "\n",
- "通过`Vocab`创建词典后,我们可以实现词元与数字索引之间的互相转换。我们可以通过调用`enocde`函数,返回输入词元或者词元序列对应的数字索引或数字索引序列,反之亦然,我们同样可以通过调用`decode`函数,返回输入数字索引或数字索引序列对应的词元或词元序列。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 27,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [],
- "source": [
- "class Vocab:\n",
- " \"\"\"通过词频字典,构建词典\"\"\"\n",
- "\n",
- " special_tokens = ['', '', '', '']\n",
- "\n",
- " def __init__(self, word_count_dict, min_freq=1):\n",
- " self.word2idx = {}\n",
- " for idx, tok in enumerate(self.special_tokens):\n",
- " self.word2idx[tok] = idx\n",
- "\n",
- " filted_dict = {\n",
- " w: c\n",
- " for w, c in word_count_dict.items() if c >= min_freq\n",
- " }\n",
- " for w, _ in filted_dict.items():\n",
- " self.word2idx[w] = len(self.word2idx)\n",
- "\n",
- " self.idx2word = {idx: word for word, idx in self.word2idx.items()}\n",
- "\n",
- " self.bos_idx = self.word2idx['']\n",
- " self.eos_idx = self.word2idx['']\n",
- " self.pad_idx = self.word2idx['']\n",
- " self.unk_idx = self.word2idx['']\n",
- "\n",
- " def _word2idx(self, word):\n",
- " \"\"\"单词映射至数字索引\"\"\"\n",
- " if word not in self.word2idx:\n",
- " return self.unk_idx\n",
- " return self.word2idx[word]\n",
- "\n",
- " def _idx2word(self, idx):\n",
- " \"\"\"数字索引映射至单词\"\"\"\n",
- " if idx not in self.idx2word:\n",
- " raise ValueError('input index is not in vocabulary.')\n",
- " return self.idx2word[idx]\n",
- "\n",
- " def encode(self, word_or_list):\n",
- " \"\"\"将单个单词或单词数组映射至单个数字索引或数字索引数组\"\"\"\n",
- " if isinstance(word_or_list, list):\n",
- " return [self._word2idx(i) for i in word_or_list]\n",
- " return self._word2idx(word_or_list)\n",
- "\n",
- " def decode(self, idx_or_list):\n",
- " \"\"\"将单个数字索引或数字索引数组映射至单个单词或单词数组\"\"\"\n",
- " if isinstance(idx_or_list, list):\n",
- " return [self._idx2word(i) for i in idx_or_list]\n",
- " return self._idx2word(idx_or_list)\n",
- "\n",
- " def __len__(self):\n",
- " return len(self.word2idx)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "通过自定义词频字典进行测试,我们可以看到词典已去除词频少于2的词元c,并加入了默认的四个特殊占位符,故词典整体长度为:4 - 1 + 4 = 7"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 28,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "data": {
- "text/plain": [
- "7"
- ]
- },
- "execution_count": 28,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "word_count = {'a':20, 'b':10, 'c':1, 'd':2}\n",
- "\n",
- "vocab = Vocab(word_count, min_freq=2)\n",
- "len(vocab)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "使用`collections`中的`Counter`和`OrderedDict`统计英/德语每个单词在整体文本中出现的频率。构建词频字典,然后再将词频字典转为词典。其中,收录所有源语言(德语)词元的词典为`de_vocab`,收录所有目标语言(英语)词元的词典为`en_vocab`。\n",
- "\n",
- "在分配数字索引时有一个小技巧:常用的词元对应数值较小的索引,这样可以节约空间。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 29,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "from collections import Counter, OrderedDict\n",
- "\n",
- "def build_vocab(dataset):\n",
- " de_words, en_words = [], []\n",
- " for de, en in dataset:\n",
- " de_words.extend(de)\n",
- " en_words.extend(en)\n",
- "\n",
- " de_count_dict = OrderedDict(sorted(Counter(de_words).items(), key=lambda t: t[1], reverse=True))\n",
- " en_count_dict = OrderedDict(sorted(Counter(en_words).items(), key=lambda t: t[1], reverse=True))\n",
- "\n",
- " return Vocab(de_count_dict, min_freq=2), Vocab(en_count_dict, min_freq=2)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 30,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Unique tokens in de vocabulary: 7882\n"
- ]
- }
- ],
- "source": [
- "de_vocab, en_vocab = build_vocab(train_dataset)\n",
- "print('Unique tokens in de vocabulary:', len(de_vocab))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "#### 数据迭代器\n",
- "\n",
- "数据预处理的最后一步是创建数据迭代器。截至目前,我们已经通过数据加载器`Multi30K`将源语言(德语)与目标语言(英语)的文本描述转换为词元序列,并构建了词元与数字索引一一对应的词典,接下来,需要将词元序列转换为数字索引序列。\n",
- "\n",
- "还是以“Hello world!”为例,我们逐步演示数据迭代器中的操作\n",
- "\n",
- "1. 我们将表示开始和结束的特殊词元 <bos> 和 <eos> 分别添加在每个词元序列的句首和句尾。\n",
- "\n",
- "\n",
- "[\"hello\", \"world\", \"!\"] --> [\"<bos>\", \"hello\", \"world\", \"!\", \"<eos>\"]\n",
- "\n",
- "\n",
- "2. 统一序列长度(超出长度的进行截断,未达到长度的通过填充 <pad> 进行补齐),同时记录序列的有效长度。此处假定统一的长度为7。\n",
- "\n",
- "\n",
- "[\"<bos>\", \"hello\", \"world\", \"!\", \"<eos>\"] --> [\"\", \"hello\", \"world\", \"!\", \"<eos>\", \"<pad>\", \"<pad>\"], valid length = 5\n",
- "\n",
- "\n",
- "3. 最后,对文本序列进行批处理。对于每个batch中的序列,通过调用词典中的`encode`为序列中的所有词元找到其对应的数字索引,将结果以`Tensor`的形式返回。\n",
- "\n",
- "\n",
- "[\"<bos>\", \"hello\", \"world\", \"!\", \"<eos>\", \"<pad>\", \"<pad>\"] --> [2, 4, 5, 6, 3, 1, 1] --> tensor\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 31,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [],
- "source": [
- "import mindspore\n",
- "\n",
- "class Iterator():\n",
- " \"\"\"创建数据迭代器\"\"\"\n",
- " def __init__(self, dataset, de_vocab, en_vocab, batch_size, max_len=32, drop_reminder=False):\n",
- " self.dataset = dataset\n",
- " self.de_vocab = de_vocab\n",
- " self.en_vocab = en_vocab\n",
- "\n",
- " self.batch_size = batch_size\n",
- " self.max_len = max_len\n",
- " self.drop_reminder = drop_reminder\n",
- "\n",
- " length = len(self.dataset) // batch_size\n",
- " self.len = length if drop_reminder else length + 1 # 批量数量\n",
- "\n",
- " def __call__(self):\n",
- " def pad(idx_list, vocab, max_len):\n",
- " \"\"\"统一序列长度,并记录有效长度\"\"\"\n",
- " idx_pad_list, idx_len = [], []\n",
- " for i in idx_list:\n",
- " if len(i) > max_len - 2:\n",
- " idx_pad_list.append(\n",
- " [vocab.bos_idx] + i[:max_len-2] + [vocab.eos_idx]\n",
- " )\n",
- " idx_len.append(max_len)\n",
- " else:\n",
- " idx_pad_list.append(\n",
- " [vocab.bos_idx] + i + [vocab.eos_idx] + [vocab.pad_idx] * (max_len - len(i) - 2)\n",
- " )\n",
- " idx_len.append(len(i) + 2)\n",
- " return idx_pad_list, idx_len\n",
- "\n",
- " def sort_by_length(src, trg):\n",
- " \"\"\"对德/英语的字段长度进行排序\"\"\"\n",
- " data = zip(src, trg)\n",
- " data = sorted(data, key=lambda t: len(t[0]), reverse=True)\n",
- " return zip(*list(data))\n",
- "\n",
- " def encode_and_pad(batch_data, max_len):\n",
- " \"\"\"将批量中的文本数据转换为数字索引,并统一每个序列的长度\"\"\"\n",
- " src_data, trg_data = zip(*batch_data)\n",
- " src_idx = [self.de_vocab.encode(i) for i in src_data]\n",
- " trg_idx = [self.en_vocab.encode(i) for i in trg_data]\n",
- "\n",
- " src_idx, trg_idx = sort_by_length(src_idx, trg_idx)\n",
- " src_idx_pad, src_len = pad(src_idx, de_vocab, max_len)\n",
- " trg_idx_pad, _ = pad(trg_idx, en_vocab, max_len)\n",
- "\n",
- " return src_idx_pad, src_len, trg_idx_pad\n",
- "\n",
- " for i in range(self.len):\n",
- " if i == self.len - 1 and not self.drop_reminder:\n",
- " batch_data = self.dataset[i * self.batch_size:]\n",
- " else:\n",
- " batch_data = self.dataset[i * self.batch_size: (i+1) * self.batch_size]\n",
- "\n",
- " src_idx, src_len, trg_idx = encode_and_pad(batch_data, self.max_len)\n",
- " yield mindspore.Tensor(src_idx, mindspore.int32), \\\n",
- " mindspore.Tensor(src_len, mindspore.int32), \\\n",
- " mindspore.Tensor(trg_idx, mindspore.int32)\n",
- "\n",
- " def __len__(self):\n",
- " return self.len"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 32,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "src_idx.shape:(128, 32)\n",
- "\n",
- "[[ 2 5 13 ... 1 1 1]\n",
- "\n",
- " [ 2 5 13 ... 1 1 1]\n",
- "\n",
- " [ 2 5 13 ... 1 1 1]\n",
- "\n",
- " ...\n",
- "\n",
- " [ 2 5 52 ... 1 1 1]\n",
- "\n",
- " [ 2 8 37 ... 1 1 1]\n",
- "\n",
- " [ 2 5 33 ... 1 1 1]]\n",
- "\n",
- "src_len.shape:(128,)\n",
- "\n",
- "[27 25 24 24 23 23 23 23 22 22 22 21 21 21 21 21 20 20 20 20 20 19 19 19\n",
- "\n",
- " 18 18 18 18 18 18 18 18 17 17 17 17 17 17 17 17 17 17 16 16 16 16 16 16\n",
- "\n",
- " 16 16 16 16 15 15 15 15 15 15 15 15 15 15 15 14 14 14 14 14 14 14 14 14\n",
- "\n",
- " 14 14 14 14 13 13 13 13 13 13 13 13 13 12 12 12 12 12 12 12 12 12 12 12\n",
- "\n",
- " 12 12 12 12 12 12 12 12 11 11 11 11 11 11 11 11 11 11 10 10 10 10 10 10\n",
- "\n",
- " 10 9 9 9 9 9 9 8]\n",
- "\n",
- "trg_idx.shape:(128, 32)\n",
- "\n",
- "[[ 2 4 2243 ... 1 1 1]\n",
- "\n",
- " [ 2 4 9 ... 1 1 1]\n",
- "\n",
- " [ 2 4 9 ... 1 1 1]\n",
- "\n",
- " ...\n",
- "\n",
- " [ 2 4 55 ... 1 1 1]\n",
- "\n",
- " [ 2 4 38 ... 1 1 1]\n",
- "\n",
- " [ 2 4 35 ... 1 1 1]]\n"
- ]
- }
- ],
- "source": [
- "train_iterator = Iterator(train_dataset, de_vocab, en_vocab, batch_size=128, max_len=32, drop_reminder=True)\n",
- "valid_iterator = Iterator(valid_dataset, de_vocab, en_vocab, batch_size=128, max_len=32, drop_reminder=False)\n",
- "test_iterator = Iterator(test_dataset, de_vocab, en_vocab, batch_size=1, max_len=32, drop_reminder=False)\n",
- "\n",
- "\n",
- "for src_idx, src_len, trg_idx in train_iterator():\n",
- " print(f'src_idx.shape:{src_idx.shape}\\n{src_idx}\\nsrc_len.shape:{src_len.shape}\\n{src_len}\\ntrg_idx.shape:{trg_idx.shape}\\n{trg_idx}')\n",
- " break"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## 模型构建\n",
- "\n",
- "定义超参数,实例化模型。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 33,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "src_vocab_size = len(de_vocab)\n",
- "trg_vocab_size = len(en_vocab)\n",
- "src_pad_idx = de_vocab.pad_idx\n",
- "trg_pad_idx = en_vocab.pad_idx\n",
- "\n",
- "d_model = 512\n",
- "d_ff = 2048\n",
- "n_layers = 6\n",
- "n_heads = 8\n",
- "\n",
- "encoder = Encoder(src_vocab_size, d_model, n_heads, d_ff, n_layers, dropout_p=0.1)\n",
- "decoder = Decoder(trg_vocab_size, d_model, n_heads, d_ff, n_layers, dropout_p=0.1)\n",
- "model = Transformer(encoder, decoder)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## 模型训练 & 模型评估"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "source": [
- "定义损失函数与优化器。\n",
- "\n",
- "- 损失函数:定义如何计算模型输出(logits)与目标(targets)之间的误差,这里可以使用交叉熵损失(CrossEntropyLoss)\n",
- "- 优化器:MindSpore将模型优化算法的实现称为**优化器**。优化器内部定义了模型的参数优化过程(即梯度如何更新至模型参数),所有优化逻辑都封装在优化器对象中。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 34,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "loss_fn = nn.CrossEntropyLoss(ignore_index=trg_pad_idx)\n",
- "optimizer = nn.Adam(model.trainable_params(), learning_rate=0.0001)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "### 模型训练逻辑\n",
- "\n",
- "MindSpore在模型训练部分使用了函数式编程(FP)。\n",
- "\n",
- "$$\\text{构造函数}\\rightarrow \\text{函数变换} \\rightarrow \\text{函数调用}$$\n",
- "\n",
- "
\n",
- "\n",
- "1. Network+loss function直接构造正向函数\n",
- "2. 函数变换,获得梯度计算(反向传播)函数\n",
- "3. 构造训练过程函数\n",
- "4. 调用函数进行训练"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "定义前向网络计算逻辑。\n",
- "\n",
- "在训练过程中,表示句子结尾的 <eos> 占位符应是被模型预测出来,而不是作为模型的输入,所以在处理 Decoder 的输入时,我们需要移除目标序列最末的 <eos> 占位符。\n",
- "\n",
- "trg = [, x_1, x_2, ..., x_n, ]\n",
- "\n",
- "trg[:-1] = [, x_1, x_2, ..., x_n]\n",
- "\n",
- "\n",
- "其中,$x_i$代表目标序列中第i个表示实际内容的词元。\n",
- "\n",
- "我们期望最终的输出包含表示句末的 <eos> ,不包含表示句首的 <bos>,所以在计算损失时,需要同样去除的目标序列的句首 <bos> 占位符,再进行比较。\n",
- "\n",
- "output = [y_1, y_2, ..., y_n, <eos>]\n",
- "\n",
- "trg[1:] = [x_1, x_2, ..., x_n, <bos>]\n",
- "\n",
- "其中,$y_i$表示预测的第i个实际内容词元。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 35,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [],
- "source": [
- "def forward(enc_inputs, dec_inputs):\n",
- " \"\"\"前向网络\n",
- " enc_inputs: [batch_size, src_len]\n",
- " dec_inputs: [batch_size, trg_len]\n",
- " \"\"\"\n",
- " logits, _, _, _ = model(enc_inputs, dec_inputs[:, :-1], src_pad_idx, trg_pad_idx)\n",
- "\n",
- " targets = dec_inputs[:, 1:].view(-1)\n",
- " loss = loss_fn(logits, targets)\n",
- "\n",
- " return loss"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "定义梯度计算函数。\n",
- "\n",
- "为了优化模型参数,需要求参数对loss的导数。我们调用`mindspore.ops.value_and_grad`函数,来获得function的微分函数。\n",
- "\n",
- "
\n",
- "\n",
- "常用到的参数有三种:\n",
- "\n",
- "- fn:待求导的函数;\n",
- "- grad_position:指定求导输入位置的索引;\n",
- "- weights:指定求导的参数;"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "由于使用Cell封装神经网络模型,模型参数为Cell的内部属性,此时我们不需要使用`grad_position`指定对函数输入求导,因此将其配置为None。对模型参数求导时,我们使用weights参数,使用`model.trainable_params()`方法从Cell中取出可以求导的参数。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 36,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "grad_fn = mindspore.value_and_grad(forward, None, optimizer.parameters)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "定义训练一个step的逻辑。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 37,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "def train_step(enc_inputs, dec_inputs):\n",
- " loss, grads = grad_fn(enc_inputs, dec_inputs)\n",
- " optimizer(grads)\n",
- " return loss"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "定义整体训练逻辑。\n",
- "\n",
- "在训练中,模型会以最小化损失为目标更新模型权重,故模型状态需设置为训练`model.set_train(True)`。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 38,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "from tqdm import tqdm\n",
- "\n",
- "def train(iterator, epoch=0):\n",
- " model.set_train(True)\n",
- " num_batches = len(iterator)\n",
- " total_loss = 0\n",
- " total_steps = 0\n",
- "\n",
- " with tqdm(total=num_batches) as t:\n",
- " t.set_description(f'Epoch: {epoch}')\n",
- " for src, src_len, trg in iterator():\n",
- " loss = train_step(src, trg)\n",
- " total_loss += loss.asnumpy()\n",
- " total_steps += 1\n",
- " curr_loss = total_loss / total_steps\n",
- " t.set_postfix({'loss': f'{curr_loss:.2f}'})\n",
- " t.update(1)\n",
- "\n",
- " return total_loss / total_steps"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "定义模型评估逻辑。\n",
- "\n",
- "在评估中,仅需正向计算loss,无需更新模型参数,故模型状态需设置为训练`model.set_train(False)`。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 39,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [],
- "source": [
- "def evaluate(iterator):\n",
- " model.set_train(False)\n",
- " num_batches = len(iterator)\n",
- " total_loss = 0\n",
- " total_steps = 0\n",
- "\n",
- " with tqdm(total=num_batches) as t:\n",
- " for src, _, trg in iterator():\n",
- " loss = forward(src, trg)\n",
- " total_loss += loss.asnumpy()\n",
- " total_steps += 1\n",
- " curr_loss = total_loss / total_steps\n",
- " t.set_postfix({'loss': f'{curr_loss:.2f}'})\n",
- " t.update(1)\n",
- "\n",
- " return total_loss / total_steps"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "#### 模型训练\n",
- "\n",
- "数据集遍历迭代,一次完整的数据集遍历成为一个epoch。我们逐个epoch打印训练的损失值和评估精度,并通过`save_checkpoint`保存评估精度最高的ckpt文件(transformer.ckpt)到home_path/.mindspore_examples/transformer.ckpt。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 45,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Epoch: 0: 100%|██████████| 226/226 [02:34<00:00, 1.46it/s, loss=2.88]\n",
- "\n",
- "100%|██████████| 8/8 [00:04<00:00, 1.88it/s, loss=2.39]\n",
- "\n",
- "Epoch: 1: 100%|██████████| 226/226 [02:35<00:00, 1.46it/s, loss=2.25]\n",
- "\n",
- "100%|██████████| 8/8 [00:04<00:00, 1.88it/s, loss=2.06]\n",
- "\n",
- "Epoch: 2: 100%|██████████| 226/226 [02:35<00:00, 1.46it/s, loss=1.89]\n",
- "\n",
- "100%|██████████| 8/8 [00:04<00:00, 1.88it/s, loss=1.86]\n",
- "\n",
- "Epoch: 3: 100%|██████████| 226/226 [02:35<00:00, 1.46it/s, loss=1.65]\n",
- "\n",
- "100%|██████████| 8/8 [00:04<00:00, 1.88it/s, loss=1.75]\n",
- "\n",
- "Epoch: 4: 100%|██████████| 226/226 [02:35<00:00, 1.46it/s, loss=1.46]\n",
- "\n",
- "100%|██████████| 8/8 [00:04<00:00, 1.87it/s, loss=1.68]\n",
- "\n",
- "Epoch: 5: 100%|██████████| 226/226 [02:35<00:00, 1.46it/s, loss=1.29]\n",
- "\n",
- "100%|██████████| 8/8 [00:04<00:00, 1.88it/s, loss=1.66]\n",
- "\n",
- "Epoch: 6: 100%|██████████| 226/226 [02:35<00:00, 1.46it/s, loss=1.16]\n",
- "\n",
- "100%|██████████| 8/8 [00:04<00:00, 1.88it/s, loss=1.63]\n",
- "\n",
- "Epoch: 7: 100%|██████████| 226/226 [02:35<00:00, 1.46it/s, loss=1.04]\n",
- "\n",
- "100%|██████████| 8/8 [00:04<00:00, 1.88it/s, loss=1.61]\n",
- "\n",
- "Epoch: 8: 100%|██████████| 226/226 [02:35<00:00, 1.46it/s, loss=0.94]\n",
- "\n",
- "100%|██████████| 8/8 [00:04<00:00, 1.88it/s, loss=1.59]\n",
- "\n",
- "Epoch: 9: 100%|██████████| 226/226 [02:35<00:00, 1.46it/s, loss=0.84]\n",
- "\n",
- "100%|██████████| 8/8 [00:04<00:00, 1.88it/s, loss=1.63]\n"
- ]
- }
- ],
- "source": [
- "from mindspore import save_checkpoint\n",
- "\n",
- "num_epochs = 10\n",
- "best_valid_loss = float('inf')\n",
- "ckpt_file_name = './transformer.ckpt'\n",
- "\n",
- "\n",
- "for i in range(num_epochs):\n",
- " train_loss = train(train_iterator, i)\n",
- " valid_loss = evaluate(valid_iterator)\n",
- "\n",
- " if valid_loss < best_valid_loss:\n",
- " best_valid_loss = valid_loss\n",
- " save_checkpoint(model, ckpt_file_name)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## 模型推理\n",
- "\n",
- "首先,通过`load_checkpoint`与`load_param_into_net`将训练好的模型参数加载入新实例化的模型中。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 46,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "data": {
- "text/plain": [
- "([], [])"
- ]
- },
- "execution_count": 46,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "from mindspore import load_checkpoint, load_param_into_net\n",
- "\n",
- "encoder = Encoder(src_vocab_size, d_model, n_heads, d_ff, n_layers, dropout_p=0.1)\n",
- "decoder = Decoder(trg_vocab_size, d_model, n_heads, d_ff, n_layers, dropout_p=0.1)\n",
- "new_model = Transformer(encoder, decoder)\n",
- "\n",
- "param_dict = load_checkpoint(ckpt_file_name)\n",
- "load_param_into_net(new_model, param_dict)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "推理过程中无需对模型参数进行更新,所以这里`model.set_train(False)`。\n",
- "\n",
- "我们输入一个德文语句,期望可以返回翻译好的英文语句。\n",
- "\n",
- "首先通过Encoder提取德文序列中的特征信息,并将其传输至Decoder。\n",
- "\n",
- "Decoder最开始的输入为起始占位符 <bos>,每次会根据输入预测下一个出现的单词,并对输入进行更新,直到预测出终止占位符 <eos> 。\n",
- "\n",
- "
\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 47,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [],
- "source": [
- "def inference(sentence, max_len=32):\n",
- " \"\"\"模型推理:输入一个德语句子,输出翻译后的英文句子\n",
- " enc_inputs: [batch_size(1), src_len]\n",
- " \"\"\"\n",
- " new_model.set_train(False)\n",
- " \n",
- " # 对输入句子进行分词\n",
- " if isinstance(sentence, str):\n",
- " tokens = [tok.lower() for tok in re.findall(r'\\w+|[^\\w\\s]', sentence.rstrip())]\n",
- " else:\n",
- " tokens = [token.lower() for token in sentence]\n",
- " \n",
- " # 补充起始、终止占位符,统一序列长度\n",
- " if len(tokens) > max_len - 2:\n",
- " src_len = max_len\n",
- " tokens = [''] + tokens[:max_len - 2] + ['']\n",
- " else:\n",
- " src_len = len(tokens) + 2\n",
- " tokens = [''] + tokens + [''] + [''] * (max_len - src_len)\n",
- " \n",
- " # 将德语单词转换为数字索引,并进一步转换为tensor\n",
- " # enc_inputs: [1, src_len]\n",
- " indexes = de_vocab.encode(tokens)\n",
- " enc_inputs = Tensor(indexes, mstype.float32).expand_dims(0)\n",
- " \n",
- " # 将输入送入encoder,获取信息\n",
- " enc_outputs, _ = new_model.encoder(enc_inputs, src_pad_idx)\n",
- "\n",
- " dec_inputs = Tensor([[en_vocab.bos_idx]], mstype.float32)\n",
- " \n",
- " # 初始化decoder输入,此时仅有句首占位符\n",
- " # dec_inputs: [1, 1]\n",
- " max_len = enc_inputs.shape[1]\n",
- " for _ in range(max_len):\n",
- " dec_outputs, _, _ = new_model.decoder(dec_inputs, enc_inputs, enc_outputs, src_pad_idx, trg_pad_idx)\n",
- " dec_logits = dec_outputs.view((-1, dec_outputs.shape[-1]))\n",
- " \n",
- " # 找到下一个词的概率分布,并输出预测\n",
- " dec_logits = dec_logits[-1, :]\n",
- " pred = dec_logits.argmax(axis=0).expand_dims(0).expand_dims(0)\n",
- " pred = pred.astype(mstype.float32)\n",
- " # 更新dec_inputs\n",
- " dec_inputs = ops.concat((dec_inputs, pred), axis=1)\n",
- " # 如果出现,则终止循环\n",
- " if int(pred.asnumpy()[0,0]) == en_vocab.eos_idx:\n",
- " break\n",
- " # 将数字索引转换为英文单词\n",
- " trg_indexes = [int(i) for i in dec_inputs.view(-1).asnumpy()]\n",
- " eos_idx = trg_indexes.index(en_vocab.eos_idx) if en_vocab.eos_idx in trg_indexes else -1\n",
- " trg_tokens = en_vocab.decode(trg_indexes[1:eos_idx])\n",
- "\n",
- " return trg_tokens"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "以测试数据集中的第一组语句为例,进行测试。"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 48,
- "metadata": {
- "slideshow": {
- "slide_type": "fragment"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "src = ['ein', 'mann', 'mit', 'einem', 'orangefarbenen', 'hut', ',', 'der', 'etwas', 'anstarrt', '.']\n",
- "\n",
- "trg = ['a', 'man', 'in', 'an', 'orange', 'hat', 'starring', 'at', 'something', '.']\n",
- "\n",
- "predicted trg = ['a', 'man', 'in', 'an', 'orange', 'hat', '', 'something', '.']\n"
- ]
- }
- ],
- "source": [
- "example_idx = 0\n",
- "\n",
- "src = test_dataset[example_idx][0]\n",
- "trg = test_dataset[example_idx][1]\n",
- "pred_trg = inference(src)\n",
- "\n",
- "print(f'src = {src}')\n",
- "print(f'trg = {trg}')\n",
- "print(f\"predicted trg = {pred_trg}\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "source": [
- "## BLEU得分\n",
- "\n",
- "双语替换评测得分(bilingual evaluation understudy,BLEU)为衡量文本翻译模型生成出来的语句好坏的一种算法,它的核心在于评估机器翻译的译文 $\\text{pred}$ 与人工翻译的参考译文 $\\text{label}$ 的相似度。通过对机器译文的片段与参考译文进行比较,计算出各个片段的的分数,并配以权重进行加和,基本规则为:\n",
- "\n",
- "1. 惩罚过短的预测,即如果机器翻译出来的译文相对于人工翻译的参考译文过于短小,则命中率越高,需要施加更多的惩罚;\n",
- "2. 对长段落匹配更高的权重,即如果出现长段落的完全命中,说明机器翻译的译文更贴近人工翻译的参考译文;\n",
- "\n",
- "BLEU的公式如下:\n",
- "\n",
- "$$exp(min(0, 1-\\frac{len(\\text{label})}{len(\\text{pred})})\\Pi^k_{n=1}p_n^{1/2^n})$$\n",
- "\n",
- "- `len(label)`:人工翻译的译文长度\n",
- "- `len(pred)`:机器翻译的译文长度\n",
- "- `p_n`:n-gram的精度\n",
- "\n",
- "我们可以调用`nltk`中的`corpus_bleu`函数来计算BLEU,在此之前,需要手动下载`nltk`。\n",
- "> pip install nltk"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 49,
- "metadata": {
- "slideshow": {
- "slide_type": "slide"
- }
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "BLEU score = 48.22\n"
- ]
- }
- ],
- "source": [
- "from nltk.translate.bleu_score import corpus_bleu\n",
- "\n",
- "def calculate_bleu(dataset, max_len=50):\n",
- " trgs = []\n",
- " pred_trgs = []\n",
- " \n",
- " for data in dataset[:10]:\n",
- " \n",
- " src = data[0]\n",
- " trg = data[1]\n",
- "\n",
- " pred_trg = inference(src, max_len)\n",
- " pred_trgs.append(pred_trg)\n",
- " trgs.append([trg])\n",
- " \n",
- " return corpus_bleu(trgs, pred_trgs)\n",
- "\n",
- "bleu_score = calculate_bleu(test_dataset)\n",
- "\n",
- "print(f'BLEU score = {bleu_score*100:.2f}')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "AIGalleryInfo": {
- "item_id": "b112c35e-18f9-4a22-bb40-347df216632c"
- },
- "celltoolbar": "Slideshow",
- "flavorInfo": {
- "architecture": "X86_64",
- "category": "GPU"
- },
- "imageInfo": {
- "id": "e1a07296-22a8-4f05-8bc8-e936c8e54202",
- "name": "mindspore1.7.0-cuda10.1-py3.7-ubuntu18.04"
- },
- "kernelspec": {
- "display_name": "ms2.2.14",
- "language": "python",
- "name": "ms2.2.14"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.19"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}