add jp translations
parent
0db4fce48e
commit
9fc7314a06
|
@ -1,23 +0,0 @@
|
|||
import type { AppProps } from 'next/app';
|
||||
import Script from 'next/script';
|
||||
import { Analytics } from '@vercel/analytics/react';
|
||||
|
||||
function MyApp({ Component, pageProps }: AppProps) {
|
||||
return (
|
||||
<>
|
||||
<Script async src="https://www.googletagmanager.com/gtag/js?id=G-ST7R3WQ353"/>
|
||||
<Script>{`
|
||||
window.dataLayer = window.dataLayer || [];
|
||||
function gtag(){dataLayer.push(arguments);}
|
||||
gtag('js', new Date());
|
||||
gtag('config', 'G-ST7R3WQ353');
|
||||
`}
|
||||
</Script>
|
||||
|
||||
<Component {...pageProps} />
|
||||
<Analytics />
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
export default MyApp;
|
|
@ -1,23 +0,0 @@
|
|||
{
|
||||
"index": "Prompt Engineering",
|
||||
"introduction": "はじめに",
|
||||
"techniques": "テクニック",
|
||||
"applications": "アプリケーション",
|
||||
"models": "モデル",
|
||||
"risks": "リスクと誤用",
|
||||
"papers": "論文",
|
||||
"tools": "ツール",
|
||||
"notebooks": "ノートブック",
|
||||
"datasets": "データセット",
|
||||
"readings": "参考文献",
|
||||
"about": {
|
||||
"title": "About",
|
||||
"type": "page"
|
||||
},
|
||||
"contact": {
|
||||
"title": "Contact ↗",
|
||||
"type": "page",
|
||||
"href": "https://twitter.com/dair_ai",
|
||||
"newWindow": true
|
||||
}
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
{
|
||||
"flan": "Flan",
|
||||
"chatgpt": "ChatGPT",
|
||||
"gpt-4": "GPT-4"
|
||||
}
|
||||
|
|
@ -5,7 +5,7 @@ const withNextra = require('nextra')({
|
|||
|
||||
module.exports = withNextra({
|
||||
i18n: {
|
||||
locales: ['en', 'zh'],
|
||||
locales: ['en', 'zh', 'jp'],
|
||||
defaultLocale: 'en'
|
||||
}
|
||||
})
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
{
|
||||
"index": "Prompt Engineering",
|
||||
"introduction": "Introduction",
|
||||
"techniques": "Techniques",
|
||||
"applications": "Applications",
|
||||
"models": "Models",
|
||||
"risks": "Risks & Misuses",
|
||||
"papers": "Papers",
|
||||
"tools": "Tools",
|
||||
"notebooks": "Notebooks",
|
||||
"datasets": "Datasets",
|
||||
"readings": "Additional Readings",
|
||||
"about": {
|
||||
"title": "About",
|
||||
"type": "page"
|
||||
},
|
||||
"course":{
|
||||
"title": "Prompt Engineering Course",
|
||||
"type": "page"
|
||||
},
|
||||
"contact": {
|
||||
"title": "Contact ↗",
|
||||
"type": "page",
|
||||
"href": "https://twitter.com/dair_ai",
|
||||
"newWindow": true
|
||||
}
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
# Prompt Engineering Course
|
||||
|
||||
We have partnered with Sphere to deliver a ["Prompting Engineering for LLMs"](https://www.getsphere.com/cohorts/prompt-engineering-for-llms?source=promptingguide) course in May, 2023.
|
||||
|
||||
This hands-on course is designed to teach all the latest prompt engineering techniques and tools used in the real-world for effectively building applications of top of large language models.
|
||||
|
||||
If you want to take your prompt engineering skills to the next level, we highly recommend the course.
|
||||
|
||||
This course also includes a certificate of completion.
|
||||
|
||||
Note that this course will be delivered in English.
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"flan": "Flan",
|
||||
"chatgpt": "ChatGPT",
|
||||
"llama": "LLaMA",
|
||||
"gpt-4": "GPT-4",
|
||||
"collection": "Model Collection"
|
||||
}
|
||||
|
|
@ -5,4 +5,5 @@
|
|||
"gpt-4": "GPT-4",
|
||||
"collection": "Model Collection"
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
# Model Collection
|
||||
|
||||
import { Callout, FileTree } from 'nextra-theme-docs'
|
||||
|
||||
<Callout emoji="⚠️">
|
||||
This section is under heavy development.
|
||||
</Callout>
|
||||
|
||||
This section consists of a collection and summary of notable and foundational LLMs. (Data adopted from [Papers with Code](https://paperswithcode.com/methods/category/language-models) and the recent work by [Zhao et al. (2023)](https://arxiv.org/pdf/2303.18223.pdf).
|
||||
|
||||
## Models
|
||||
|
||||
| Model | Release Date | Description |
|
||||
| --- | --- | --- |
|
||||
| [BERT](https://arxiv.org/abs/1810.04805)| 2018 | Bidirectional Encoder Representations from Transformers |
|
||||
| [GPT](https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf) | 2018 | Improving Language Understanding by Generative Pre-Training |
|
||||
| [RoBERTa](https://arxiv.org/abs/1907.11692) | 2019 | A Robustly Optimized BERT Pretraining Approach |
|
||||
| [GPT-2](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) | 2019 | Language Models are Unsupervised Multitask Learners |
|
||||
| [T5](https://arxiv.org/abs/1910.10683) | 2019 | Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer |
|
||||
| [BART](https://arxiv.org/abs/1910.13461) | 2019 | Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension |
|
||||
| [ALBERT](https://arxiv.org/abs/1909.11942) |2019 | A Lite BERT for Self-supervised Learning of Language Representations |
|
||||
| [XLNet](https://arxiv.org/abs/1906.08237) | 2019 | Generalized Autoregressive Pretraining for Language Understanding and Generation |
|
||||
| [CTRL](https://arxiv.org/abs/1909.05858) |2019 | CTRL: A Conditional Transformer Language Model for Controllable Generation |
|
||||
| [ERNIE](https://arxiv.org/abs/1904.09223v1) | 2019| ERNIE: Enhanced Representation through Knowledge Integration |
|
||||
| [GShard](https://arxiv.org/abs/2006.16668v1) | 2020 | GShard: Scaling Giant Models with Conditional Computation and Automatic Sharding |
|
||||
| [GPT-3](https://arxiv.org/abs/2005.14165) | 2020 | Language Models are Few-Shot Learners |
|
||||
| [LaMDA](https://arxiv.org/abs/2201.08239v3) | 2021 | LaMDA: Language Models for Dialog Applications |
|
||||
| [PanGu-α](https://arxiv.org/abs/2104.12369v1) | 2021 | PanGu-α: Large-scale Autoregressive Pretrained Chinese Language Models with Auto-parallel Computation |
|
||||
| [mT5](https://arxiv.org/abs/2010.11934v3) | 2021 | mT5: A massively multilingual pre-trained text-to-text transformer |
|
||||
| [CPM-2](https://arxiv.org/abs/2106.10715v3) | 2021 | CPM-2: Large-scale Cost-effective Pre-trained Language Models |
|
||||
| [T0](https://arxiv.org/abs/2110.08207) |2021 |Multitask Prompted Training Enables Zero-Shot Task Generalization |
|
||||
| [HyperCLOVA](https://arxiv.org/abs/2109.04650) | 2021 | What Changes Can Large-scale Language Models Bring? Intensive Study on HyperCLOVA: Billions-scale Korean Generative Pretrained Transformers |
|
||||
| [Codex](https://arxiv.org/abs/2107.03374v2) |2021 |Evaluating Large Language Models Trained on Code |
|
||||
| [ERNIE 3.0](https://arxiv.org/abs/2107.02137v1) | 2021 | ERNIE 3.0: Large-scale Knowledge Enhanced Pre-training for Language Understanding and Generation|
|
||||
| [Jurassic-1](https://uploads-ssl.webflow.com/60fd4503684b466578c0d307/61138924626a6981ee09caf6_jurassic_tech_paper.pdf) | 2021 | Jurassic-1: Technical Details and Evaluation |
|
||||
| [FLAN](https://arxiv.org/abs/2109.01652v5) | 2021 | Finetuned Language Models Are Zero-Shot Learners |
|
||||
| [MT-NLG](https://arxiv.org/abs/2201.11990v3) | 2021 | Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, A Large-Scale Generative Language Model|
|
||||
| [Yuan 1.0](https://arxiv.org/abs/2110.04725v2) | 2021| Yuan 1.0: Large-Scale Pre-trained Language Model in Zero-Shot and Few-Shot Learning |
|
||||
| [WebGPT](https://arxiv.org/abs/2112.09332v3) | 2021 | WebGPT: Browser-assisted question-answering with human feedback |
|
||||
| [Gopher](https://arxiv.org/abs/2112.11446v2) |2021 | Scaling Language Models: Methods, Analysis & Insights from Training Gopher |
|
||||
| [ERNIE 3.0 Titan](https://arxiv.org/abs/2112.12731v1) |2021 | ERNIE 3.0 Titan: Exploring Larger-scale Knowledge Enhanced Pre-training for Language Understanding and Generation |
|
||||
| [GLaM](https://arxiv.org/abs/2112.06905) | 2021 | GLaM: Efficient Scaling of Language Models with Mixture-of-Experts |
|
||||
| [InstructGPT](https://arxiv.org/abs/2203.02155v1) | 2022 | Training language models to follow instructions with human feedback |
|
||||
| [GPT-NeoX-20B](https://arxiv.org/abs/2204.06745v1) | 2022 | GPT-NeoX-20B: An Open-Source Autoregressive Language Model |
|
||||
| [AlphaCode](https://arxiv.org/abs/2203.07814v1) | 2022 | Competition-Level Code Generation with AlphaCode |
|
||||
| [CodeGen](https://arxiv.org/abs/2203.13474v5) | 2022 | CodeGen: An Open Large Language Model for Code with Multi-Turn Program Synthesis |
|
||||
| [Chinchilla](https://arxiv.org/abs/2203.15556) | 2022 | Shows that for a compute budget, the best performances are not achieved by the largest models but by smaller models trained on more data. |
|
||||
| [Tk-Instruct](https://arxiv.org/abs/2204.07705v3) | 2022 | Super-NaturalInstructions: Generalization via Declarative Instructions on 1600+ NLP Tasks |
|
||||
| [UL2](https://arxiv.org/abs/2205.05131v3) | 2022 | UL2: Unifying Language Learning Paradigms |
|
||||
| [PaLM](https://arxiv.org/abs/2204.02311v5) |2022| PaLM: Scaling Language Modeling with Pathways |
|
||||
| [OPT](https://arxiv.org/abs/2205.01068) | 2022 | OPT: Open Pre-trained Transformer Language Models |
|
||||
| [BLOOM](https://arxiv.org/abs/2211.05100v3) | 2022 | BLOOM: A 176B-Parameter Open-Access Multilingual Language Model |
|
||||
| [GLM-130B](https://arxiv.org/abs/2210.02414v1) | 2022 | GLM-130B: An Open Bilingual Pre-trained Model |
|
||||
| [AlexaTM](https://arxiv.org/abs/2208.01448v2) | 2022 | AlexaTM 20B: Few-Shot Learning Using a Large-Scale Multilingual Seq2Seq Model |
|
||||
| [Flan-T5](https://arxiv.org/abs/2210.11416v5) | 2022 | Scaling Instruction-Finetuned Language Models |
|
||||
| [Sparrow](https://arxiv.org/abs/2209.14375) | 2022 | Improving alignment of dialogue agents via targeted human judgements |
|
||||
| [U-PaLM](https://arxiv.org/abs/2210.11399v2) | 2022 | Transcending Scaling Laws with 0.1% Extra Compute |
|
||||
| [mT0](https://arxiv.org/abs/2211.01786v1) | 2022 | Crosslingual Generalization through Multitask Finetuning |
|
||||
| [Galactica](https://arxiv.org/abs/2211.09085v1) | 2022 | Galactica: A Large Language Model for Science |
|
||||
| [OPT-IML](https://arxiv.org/abs/2212.12017v3) | 2022 | OPT-IML: Scaling Language Model Instruction Meta Learning through the Lens of Generalization |
|
||||
| [LLaMA](https://arxiv.org/abs/2302.13971v1) | 2023 | LLaMA: Open and Efficient Foundation Language Models |
|
||||
| [GPT-4](https://arxiv.org/abs/2303.08774v3) | 2023 |GPT-4 Technical Report |
|
||||
| [PanGu-Σ](https://arxiv.org/abs/2303.10845v1) | 2023 | PanGu-Σ: Towards Trillion Parameter Language Model with Sparse Heterogeneous Computing |
|
||||
| [BloombergGPT](https://arxiv.org/abs/2303.17564v1)| 2023 |BloombergGPT: A Large Language Model for Finance|
|
|
@ -0,0 +1,3 @@
|
|||
# LLaMA: Open and Efficient Foundation Language Models
|
||||
|
||||
Needs translation! Feel free to contribute a translating by clicking the `Edit this page` button on the right side.
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
## 概要
|
||||
|
||||
- [Nature Language Reasoning, A Survey](https://arxiv.org/abs/2303.14725) (March 2023)
|
||||
- [Augmented Language Models: a Survey](https://arxiv.org/abs/2302.07842) (Feb 2023)
|
||||
- [A Survey for In-context Learning](https://arxiv.org/abs/2301.00234) (Dec 2022)
|
||||
- [Towards Reasoning in Large Language Models: A Survey](https://arxiv.org/abs/2212.10403) (Dec 2022)
|
||||
|
@ -14,6 +15,10 @@
|
|||
|
||||
## 取り組み
|
||||
|
||||
- [Self-Refine: Iterative Refinement with Self-Feedback](https://arxiv.org/abs/2303.17651v1) (Mar 2023)
|
||||
- [kNN Prompting: Beyond-Context Learning with Calibration-Free Nearest Neighbor Inference](https://arxiv.org/abs/2303.13824) (Mar 2023)
|
||||
- [Visual-Language Prompt Tuning with Knowledge-guided Context Optimization](https://arxiv.org/abs/2303.13283) (Mar 2023)
|
||||
- [Fairness-guided Few-shot Prompting for Large Language Models](https://arxiv.org/abs/2303.13217) (Mar 2023)
|
||||
- [Context-faithful Prompting for Large Language Models](https://arxiv.org/abs/2303.11315) (Mar 2023)
|
||||
- [Is Prompt All You Need? No. A Comprehensive and Broader View of Instruction Learning](https://arxiv.org/abs/2303.10475) (Mar 2023)
|
||||
- [UPRISE: Universal Prompt Retrieval for Improving Zero-Shot Evaluation](https://arxiv.org/abs/2303.08518) (Mar 2023)
|
||||
|
@ -110,9 +115,22 @@
|
|||
- [AutoPrompt: Eliciting Knowledge from Language Models with Automatically Generated Prompts](https://arxiv.org/abs/2010.15980) (Oct 2020)
|
||||
- [Language Models are Few-Shot Learners](https://arxiv.org/abs/2005.14165) (May 2020)
|
||||
- [How Can We Know What Language Models Know?](https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00324/96460/How-Can-We-Know-What-Language-Models-Know) (July 2020)
|
||||
- [Scaling Laws for Neural Language Models](https://arxiv.org/abs/2001.08361) (Jan 2020)
|
||||
|
||||
## Applications
|
||||
|
||||
- [BloombergGPT: A Large Language Model for Finance](https://arxiv.org/abs/2303.17564) (March 2023)
|
||||
- [Medical Intervention Duration Estimation Using Language-enhanced Transformer Encoder with Medical Prompts](https://arxiv.org/abs/2303.17408) (March 2023)
|
||||
- [Soft-prompt tuning to predict lung cancer using primary care free-text Dutch medical notes](https://arxiv.org/abs/2303.15846) (March 2023)
|
||||
- [TaskMatrix.AI: Completing Tasks by Connecting Foundation Models with Millions of APIs](https://arxiv.org/abs/2303.16434) (March 2023)
|
||||
- [Larger Probes Tell a Different Story: Extending Psycholinguistic Datasets Via In-Context Learning](https://arxiv.org/abs/2303.16445) (March 2023)
|
||||
- [Linguistically Informed ChatGPT Prompts to Enhance Japanese-Chinese Machine Translation: A Case Study on Attributive Clauses](https://arxiv.org/abs/2303.15587) (March 2023)
|
||||
- [Knowledge-augmented Frame Semantic Parsing with Hybrid Prompt-tuning](https://arxiv.org/abs/2303.14375) (March 2023)
|
||||
- [Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation](https://arxiv.org/abs/2303.15413) (March 2023)
|
||||
- [Zero-shot Model Diagnosis](https://arxiv.org/abs/2303.15441#) (March 2023)
|
||||
- [Prompting Large Language Models to Generate Code-Mixed Texts: The Case of South East Asian Languages](https://arxiv.org/abs/2303.13592) (March 2023)
|
||||
- [SPeC: A Soft Prompt-Based Calibration on Mitigating Performance Variability in Clinical Notes Summarization](https://arxiv.org/abs/2303.13035) (March 2023)
|
||||
- [Large Language Models and Simple, Stupid Bugs](https://arxiv.org/abs/2303.11455) (March 2023)
|
||||
- [Can Generative Pre-trained Transformers (GPT) Pass Assessments in Higher Education Programming Courses?](https://arxiv.org/abs/2303.09325) (Mar 2023)
|
||||
- [SelfCheckGPT: Zero-Resource Black-Box Hallucination Detection for Generative Large Language Models](https://arxiv.org/abs/2303.08896) (Mar 2023)
|
||||
- [ICL-D3IE: In-Context Learning with Diverse Demonstrations Updating for Document Information Extraction](https://arxiv.org/abs/2303.05063) (March 2023)
|
||||
|
@ -146,6 +164,7 @@
|
|||
- [Conversing with Copilot: Exploring Prompt Engineering for Solving CS1 Problems Using Natural Language](https://arxiv.org/abs/2210.15157) (Oct 2022)
|
||||
- [Piloting Copilot and Codex: Hot Temperature, Cold Prompts, or Black Magic?](https://arxiv.org/abs/2210.14699) (Oct 2022)
|
||||
- [Plot Writing From Scratch Pre-Trained Language Models](https://aclanthology.org/2022.inlg-main.5) (July 2022)
|
||||
- [Survey of Hallucination in Natural Language Generation](https://arxiv.org/abs/2202.03629) (Feb 2022)
|
||||
|
||||
## Collections
|
||||
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
## 综述
|
||||
|
||||
- [Nature Language Reasoning, A Survey](https://arxiv.org/abs/2303.14725) (March 2023)
|
||||
- [Augmented Language Models: a Survey](https://arxiv.org/abs/2302.07842) (Feb 2023)
|
||||
- [A Survey for In-context Learning](https://arxiv.org/abs/2301.00234) (Dec 2022)
|
||||
- [Towards Reasoning in Large Language Models: A Survey](https://arxiv.org/abs/2212.10403) (Dec 2022)
|
||||
|
@ -14,8 +15,10 @@
|
|||
|
||||
## 方法
|
||||
|
||||
- [Visual-Language Prompt Tuning with Knowledge-guided Context Optimization](https://arxiv.org/abs/2303.13283) (March 2023)
|
||||
- [Fairness-guided Few-shot Prompting for Large Language Models](https://arxiv.org/abs/2303.13217) (March 2023)
|
||||
- [Self-Refine: Iterative Refinement with Self-Feedback](https://arxiv.org/abs/2303.17651v1) (Mar 2023)
|
||||
- [kNN Prompting: Beyond-Context Learning with Calibration-Free Nearest Neighbor Inference](https://arxiv.org/abs/2303.13824) (Mar 2023)
|
||||
- [Visual-Language Prompt Tuning with Knowledge-guided Context Optimization](https://arxiv.org/abs/2303.13283) (Mar 2023)
|
||||
- [Fairness-guided Few-shot Prompting for Large Language Models](https://arxiv.org/abs/2303.13217) (Mar 2023)
|
||||
- [Context-faithful Prompting for Large Language Models](https://arxiv.org/abs/2303.11315) (Mar 2023)
|
||||
- [Is Prompt All You Need? No. A Comprehensive and Broader View of Instruction Learning](https://arxiv.org/abs/2303.10475) (Mar 2023)
|
||||
- [UPRISE: Universal Prompt Retrieval for Improving Zero-Shot Evaluation](https://arxiv.org/abs/2303.08518) (Mar 2023)
|
||||
|
@ -112,9 +115,20 @@
|
|||
- [AutoPrompt: Eliciting Knowledge from Language Models with Automatically Generated Prompts](https://arxiv.org/abs/2010.15980) (Oct 2020)
|
||||
- [Language Models are Few-Shot Learners](https://arxiv.org/abs/2005.14165) (May 2020)
|
||||
- [How Can We Know What Language Models Know?](https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00324/96460/How-Can-We-Know-What-Language-Models-Know) (July 2020)
|
||||
- [Scaling Laws for Neural Language Models](https://arxiv.org/abs/2001.08361) (Jan 2020)
|
||||
|
||||
## 应用
|
||||
|
||||
- [BloombergGPT: A Large Language Model for Finance](https://arxiv.org/abs/2303.17564) (March 2023)
|
||||
- [Medical Intervention Duration Estimation Using Language-enhanced Transformer Encoder with Medical Prompts](https://arxiv.org/abs/2303.17408) (March 2023)
|
||||
- [Soft-prompt tuning to predict lung cancer using primary care free-text Dutch medical notes](https://arxiv.org/abs/2303.15846) (March 2023)
|
||||
- [TaskMatrix.AI: Completing Tasks by Connecting Foundation Models with Millions of APIs](https://arxiv.org/abs/2303.16434) (March 2023)
|
||||
- [Larger Probes Tell a Different Story: Extending Psycholinguistic Datasets Via In-Context Learning](https://arxiv.org/abs/2303.16445) (March 2023)
|
||||
- [Linguistically Informed ChatGPT Prompts to Enhance Japanese-Chinese Machine Translation: A Case Study on Attributive Clauses](https://arxiv.org/abs/2303.15587) (March 2023)
|
||||
- [Knowledge-augmented Frame Semantic Parsing with Hybrid Prompt-tuning](https://arxiv.org/abs/2303.14375) (March 2023)
|
||||
- [Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation](https://arxiv.org/abs/2303.15413) (March 2023)
|
||||
- [Zero-shot Model Diagnosis](https://arxiv.org/abs/2303.15441#) (March 2023)
|
||||
- [Prompting Large Language Models to Generate Code-Mixed Texts: The Case of South East Asian Languages](https://arxiv.org/abs/2303.13592) (March 2023)
|
||||
- [SPeC: A Soft Prompt-Based Calibration on Mitigating Performance Variability in Clinical Notes Summarization](https://arxiv.org/abs/2303.13035) (March 2023)
|
||||
- [Large Language Models and Simple, Stupid Bugs](https://arxiv.org/abs/2303.11455) (March 2023)
|
||||
- [Can Generative Pre-trained Transformers (GPT) Pass Assessments in Higher Education Programming Courses?](https://arxiv.org/abs/2303.09325) (Mar 2023)
|
||||
|
@ -150,7 +164,7 @@
|
|||
- [Conversing with Copilot: Exploring Prompt Engineering for Solving CS1 Problems Using Natural Language](https://arxiv.org/abs/2210.15157) (Oct 2022)
|
||||
- [Piloting Copilot and Codex: Hot Temperature, Cold Prompts, or Black Magic?](https://arxiv.org/abs/2210.14699) (Oct 2022)
|
||||
- [Plot Writing From Scratch Pre-Trained Language Models](https://aclanthology.org/2022.inlg-main.5) (July 2022)
|
||||
|
||||
- [Survey of Hallucination in Natural Language Generation](https://arxiv.org/abs/2202.03629) (Feb 2022)
|
||||
## 收集
|
||||
|
||||
- [Chain-of-Thought Papers](https://github.com/Timothyxxx/Chain-of-ThoughtsPapers)
|
||||
|
|
|
@ -17,7 +17,8 @@ const config: DocsThemeConfig = {
|
|||
),
|
||||
i18n: [
|
||||
{ locale: 'en', text: 'English' },
|
||||
{ locale: 'zh', text: '中文' }
|
||||
{ locale: 'zh', text: '中文' },
|
||||
{ locale: 'jp', text: '日本語'}
|
||||
],
|
||||
head: function UseHead() {
|
||||
const { title } = useConfig()
|
||||
|
|
Loading…
Reference in New Issue