From eccaf4877c60984dbc37844b05df45f6d85587bc Mon Sep 17 00:00:00 2001 From: Pedro Mosquera Date: Sat, 29 Apr 2023 19:54:13 +0200 Subject: [PATCH] Translating root documents --- ca_pages/_app.tsx | 24 ++++ ca_pages/about.ca.mdx | 11 ++ ca_pages/applications.ca.mdx | 9 ++ ca_pages/applications/_meta.ca.json | 6 + ca_pages/course.ca.mdx | 9 ++ ca_pages/datasets.ca.mdx | 13 ++ ca_pages/index.ca.mdx | 9 ++ ca_pages/introduction.ca.mdx | 8 ++ ca_pages/introduction/_meta.ca.json | 7 + ca_pages/models.ca.mdx | 9 ++ ca_pages/models/_meta.ca.json | 7 + ca_pages/notebooks.ca.mdx | 11 ++ ca_pages/papers.ca.mdx | 199 ++++++++++++++++++++++++++++ ca_pages/readings.ca.mdx | 118 +++++++++++++++++ ca_pages/risks.ca.mdx | 11 ++ ca_pages/risks/_meta.ca.json | 5 + ca_pages/style.css | 1 + ca_pages/techniques.ca.mdx | 5 + ca_pages/techniques/_meta.ca.json | 13 ++ ca_pages/tools.ca.mdx | 43 ++++++ 20 files changed, 518 insertions(+) create mode 100644 ca_pages/_app.tsx create mode 100644 ca_pages/about.ca.mdx create mode 100644 ca_pages/applications.ca.mdx create mode 100644 ca_pages/applications/_meta.ca.json create mode 100644 ca_pages/course.ca.mdx create mode 100644 ca_pages/datasets.ca.mdx create mode 100644 ca_pages/index.ca.mdx create mode 100644 ca_pages/introduction.ca.mdx create mode 100644 ca_pages/introduction/_meta.ca.json create mode 100644 ca_pages/models.ca.mdx create mode 100644 ca_pages/models/_meta.ca.json create mode 100644 ca_pages/notebooks.ca.mdx create mode 100644 ca_pages/papers.ca.mdx create mode 100644 ca_pages/readings.ca.mdx create mode 100644 ca_pages/risks.ca.mdx create mode 100644 ca_pages/risks/_meta.ca.json create mode 100644 ca_pages/style.css create mode 100644 ca_pages/techniques.ca.mdx create mode 100644 ca_pages/techniques/_meta.ca.json create mode 100644 ca_pages/tools.ca.mdx diff --git a/ca_pages/_app.tsx b/ca_pages/_app.tsx new file mode 100644 index 0000000..af66d7f --- /dev/null +++ b/ca_pages/_app.tsx @@ -0,0 +1,24 @@ +import type { AppProps } from 'next/app'; +import Script from 'next/script'; +import { Analytics } from '@vercel/analytics/react'; +import './style.css'; + +function MyApp({ Component, pageProps }: AppProps) { + return ( + <> + + + + + + ); +} + +export default MyApp; \ No newline at end of file diff --git a/ca_pages/about.ca.mdx b/ca_pages/about.ca.mdx new file mode 100644 index 0000000..e533511 --- /dev/null +++ b/ca_pages/about.ca.mdx @@ -0,0 +1,11 @@ +# Quant a + +La Guia d'Enginyeria de Prompts és un projecte de [DAIR.AI](https://github.com/dair-ai). L'objectiu és educar investigadors i professionals sobre l'enginyeria de prompts. + +DAIR.AI té com a objectiu democratitzar la investigació, l'educació i les tecnologies d'intel·ligència artificial. La nostra missió és habilitar la propera generació d'innovadors i creadors d'IA. + +Donem la benvinguda a les contribucions de la comunitat. Estigueu atents als botons d'Edició. + +Informació sobre la llicència [aquí](https://github.com/dair-ai/Prompt-Engineering-Guide#license). + +Agafem inspiració de molts recursos oberts com [OpenAI CookBook](https://github.com/openai/openai-cookbook), [Pretrain, Prompt, Predict](http://pretrain.nlpedia.ai/), [Learn Prompting](https://learnprompting.org/) i molts altres. \ No newline at end of file diff --git a/ca_pages/applications.ca.mdx b/ca_pages/applications.ca.mdx new file mode 100644 index 0000000..5cacc47 --- /dev/null +++ b/ca_pages/applications.ca.mdx @@ -0,0 +1,9 @@ +# Aplicacions de Prompts + +import { Callout } from 'nextra-theme-docs' + +En aquesta secció, tractarem algunes maneres avançades i interessants d'utilitzar l'enginyeria de prompts per realitzar tasques útils i més avançades. + + + Aquesta secció està en plena fase de desenvolupament. + \ No newline at end of file diff --git a/ca_pages/applications/_meta.ca.json b/ca_pages/applications/_meta.ca.json new file mode 100644 index 0000000..e8f4804 --- /dev/null +++ b/ca_pages/applications/_meta.ca.json @@ -0,0 +1,6 @@ +{ + "pal": "Models de Llenguatge Assistits per Programa", + "generating": "Generació de Dades", + "coding": "Generació de Codi", + "workplace_casestudy": "Estudi de Cas de Classificació de Llocs de Treball per a Titulats" +} \ No newline at end of file diff --git a/ca_pages/course.ca.mdx b/ca_pages/course.ca.mdx new file mode 100644 index 0000000..541d5fc --- /dev/null +++ b/ca_pages/course.ca.mdx @@ -0,0 +1,9 @@ +# Curs d'Enginyeria de Prompts + +Hem establert una col·laboració amb Sphere per oferir un curs de ["Enginyeria de Prompts per a LLMs"](https://www.getsphere.com/cohorts/prompt-engineering-for-llms?source=promptingguide) al maig de 2023. + +Aquest curs pràctic està dissenyat per ensenyar les tècniques i eines d'enginyeria de prompts més recents utilitzades en el món real per construir aplicacions efectives sobre models de llenguatge de grans dimensions. + +Si voleu elevar les vostres habilitats en enginyeria de prompts al següent nivell, us recomanem molt aquest curs. + +Aquest curs també inclou un certificat de finalització. \ No newline at end of file diff --git a/ca_pages/datasets.ca.mdx b/ca_pages/datasets.ca.mdx new file mode 100644 index 0000000..2cd6c22 --- /dev/null +++ b/ca_pages/datasets.ca.mdx @@ -0,0 +1,13 @@ +# Datasets + +#### (Ordenats per Nom) + +- [Anthropic's Red Team dataset](https://github.com/anthropics/hh-rlhf/tree/master/red-team-attempts), [(paper)](https://arxiv.org/abs/2209.07858) +- [Awesome ChatGPT Prompts](https://huggingface.co/datasets/fka/awesome-chatgpt-prompts) +- [DiffusionDB](https://github.com/poloclub/diffusiondb) +- [Midjourney Prompts](https://huggingface.co/datasets/succinctly/midjourney-prompts) +- [P3 - Public Pool of Prompts](https://huggingface.co/datasets/bigscience/P3) +- [PartiPrompts](https://parti.research.google) +- [Real Toxicity Prompts](https://allenai.org/data/real-toxicity-prompts) +- [Stable Diffusion Dataset](https://huggingface.co/datasets/Gustavosta/Stable-Diffusion-Prompts) +- [WritingPrompts](https://www.reddit.com/r/WritingPrompts) diff --git a/ca_pages/index.ca.mdx b/ca_pages/index.ca.mdx new file mode 100644 index 0000000..e64b49f --- /dev/null +++ b/ca_pages/index.ca.mdx @@ -0,0 +1,9 @@ +# Guia d'Enginyeria de Prompts + +L'enginyeria de prompts és una disciplina relativament nova per al desenvolupament i optimització de prompts per utilitzar eficientment els models de llenguatge (LM) en una àmplia varietat d'aplicacions i temes de recerca. Les habilitats en enginyeria de prompts ajuden a entendre millor les capacitats i limitacions dels models de llenguatge de grans dimensions (LLM). + +Els investigadors utilitzen l'enginyeria de prompts per millorar la capacitat dels LLM en una àmplia gamma de tasques comunes i complexes, com ara la resposta a preguntes i el raonament aritmètic. Els desenvolupadors utilitzen l'enginyeria de prompts per dissenyar tècniques de sol·licitud robustes i efectives que interactuen amb LLM i altres eines. + +L'enginyeria de prompts no es tracta només de dissenyar i desenvolupar prompts. Abarca un ampli ventall d'habilitats i tècniques que són útils per interactuar i desenvolupar-se amb LLM. És una habilitat important per interactuar, construir i entendre les capacitats dels LLM. Podeu utilitzar l'enginyeria de prompts per millorar la seguretat dels LLM i construir noves capacitats, com ara augmentar els LLM amb coneixements de domini i eines externes. + +Motivats per l'alt interès en desenvolupar-se amb LLM, hem creat aquesta nova guia d'enginyeria de prompts que conté tots els últims articles, guies d'aprenentatge, models, conferències, referències, noves capacitats de LLM i eines relacionades amb l'enginyeria de prompts. \ No newline at end of file diff --git a/ca_pages/introduction.ca.mdx b/ca_pages/introduction.ca.mdx new file mode 100644 index 0000000..bd47aab --- /dev/null +++ b/ca_pages/introduction.ca.mdx @@ -0,0 +1,8 @@ +# Introduction + +Prompt engineering is a relatively new discipline for developing and optimizing prompts to efficiently use language models (LMs) for a wide variety of applications and research topics. Prompt engineering skills help to better understand the capabilities and limitations of large language models (LLMs). Researchers use prompt engineering to improve the capacity of LLMs on a wide range of common and complex tasks such as question answering and arithmetic reasoning. Developers use prompt engineering to design robust and effective prompting techniques that interface with LLMs and other tools. + +This guide covers the basics of prompts to provide a rough idea of how to use prompts to interact and instruct LLMs. + +All examples are tested with `text-davinci-003` using [OpenAI's playground](https://platform.openai.com/playground) unless otherwise specified. The model uses the default configurations, i.e., `temperature=0.7` and `top-p=1`. + diff --git a/ca_pages/introduction/_meta.ca.json b/ca_pages/introduction/_meta.ca.json new file mode 100644 index 0000000..6afb8d2 --- /dev/null +++ b/ca_pages/introduction/_meta.ca.json @@ -0,0 +1,7 @@ +{ + "settings": "Configuració de LLM", + "basics": "Conceptes Bàsics de la Creació de Prompts", + "elements": "Elements del Prompt", + "tips": "Consells Generals per Dissenyar Prompts", + "examples": "Exemples de Prompts" + } \ No newline at end of file diff --git a/ca_pages/models.ca.mdx b/ca_pages/models.ca.mdx new file mode 100644 index 0000000..f1fc3d1 --- /dev/null +++ b/ca_pages/models.ca.mdx @@ -0,0 +1,9 @@ +# Models + +import { Callout } from 'nextra-theme-docs' + +En aquesta secció, tractarem alguns dels models de llenguatge més recents i com apliquen amb èxit les tècniques d'enginyeria de prompts més avançades i actuals. A més, cobrim les capacitats d'aquests models en una sèrie de tasques i configuracions de prompts, com ara sol·licituds amb poques mostres (few-shot prompting), sol·licituds sense mostres (zero-shot prompting) i sol·licituds en cadena de pensament (chain-of-thought prompting). Entendre aquestes capacitats és important per comprendre les limitacions d'aquests models i com utilitzar-los de manera efectiva. + + + Aquesta secció està en plena fase de desenvolupament. + \ No newline at end of file diff --git a/ca_pages/models/_meta.ca.json b/ca_pages/models/_meta.ca.json new file mode 100644 index 0000000..8e82e06 --- /dev/null +++ b/ca_pages/models/_meta.ca.json @@ -0,0 +1,7 @@ +{ + "flan": "Flan", + "chatgpt": "ChatGPT", + "llama": "LLaMA", + "gpt-4": "GPT-4", + "collection": "Col·lecció de Models" +} \ No newline at end of file diff --git a/ca_pages/notebooks.ca.mdx b/ca_pages/notebooks.ca.mdx new file mode 100644 index 0000000..81f5ef3 --- /dev/null +++ b/ca_pages/notebooks.ca.mdx @@ -0,0 +1,11 @@ +# Notebooks d'Enginyeria de Prompts + +Conté una col·lecció de quaderns que hem dissenyat per ajudar-vos a començar amb l'enginyeria de prompts. Més aviat n'afegirem més! + +| Descripció | Notebook | +| :------------ | :---------: | +|Apreneu com realitzar molts tipus diferents de tasques comuns utilitzant les biblioteques `openai` i `LangChain`|[Getting Started with Prompt Engineering](https://github.com/dair-ai/Prompt-Engineering-Guide/blob/main/notebooks/pe-lecture.ipynb)| +|Apreneu com utilitzar el codi com a raonament per resoldre tasques comuns utilitzant l'intèrpret de Python en combinació amb el model de llenguatge.|[Program-Aided Language Model](https://github.com/dair-ai/Prompt-Engineering-Guide/blob/main/notebooks/pe-pal.ipynb)| +|Apreneu més sobre com fer trucades a les API de ChatGPT utilitzant la biblioteca `openai`.|[ChatGPT API Intro](https://github.com/dair-ai/Prompt-Engineering-Guide/blob/main/notebooks/pe-chatgpt-intro.ipynb)| +|Apreneu a utilitzar les funcions de ChatGPT utilitzant la biblioteca `LangChain`. |[ChatGPT API with LangChain](https://github.com/dair-ai/Prompt-Engineering-Guide/blob/main/notebooks/pe-chatgpt-langchain.ipynb)| +|Apreneu sobre els prompts adversaris, incloses les mesures defensives.|[Adversarial Prompt Engineering](https://github.com/dair-ai/Prompt-Engineering-Guide/blob/main/notebooks/pe-chatgpt-adversarial.ipynb)| \ No newline at end of file diff --git a/ca_pages/papers.ca.mdx b/ca_pages/papers.ca.mdx new file mode 100644 index 0000000..934eba6 --- /dev/null +++ b/ca_pages/papers.ca.mdx @@ -0,0 +1,199 @@ +# Papers + +A continuació es mostren els últims articles (ordenats per data de llançament) sobre enginyeria ràpida. Actualitzem això diàriament i apareixen nous articles. Incorporem resums d'aquests articles a les guies anteriors cada setmana. + +## Descripcions generals + + - [Tool Learning with Foundation Models](https://arxiv.org/abs/2304.08354) (Abril 2023) + - [One Small Step for Generative AI, One Giant Leap for AGI: A Complete Survey on ChatGPT in AIGC Era](https://arxiv.org/abs/2304.06488) (Abril 2023) + - [A Bibliometric Review of Large Language Models Research from 2017 to 2023](https://arxiv.org/abs/2304.02020) (Abril 2023) + - [A Survey of Large Language Models](https://arxiv.org/abs/2303.18223) (Abril 2023) + - [Nature Language Reasoning, A Survey](https://arxiv.org/abs/2303.14725) (Març 2023) + - [Augmented Language Models: a Survey](https://arxiv.org/abs/2302.07842) (Feb 2023) + - [A Survey for In-context Learning](https://arxiv.org/abs/2301.00234) (Desembre 2022) + - [Towards Reasoning in Large Language Models: A Survey](https://arxiv.org/abs/2212.10403) (Desembre 2022) + - [Reasoning with Language Model Prompting: A Survey](https://arxiv.org/abs/2212.09597) (Desembre 2022) + - [Emergent Abilities of Large Language Models](https://arxiv.org/abs/2206.07682) (Jun 2022) + - [A Taxonomy of Prompt Modifiers for Text-To-Image Generation](https://arxiv.org/abs/2204.13988) (Apr 2022) + - [Pre-train, Prompt, and Predict: A Systematic Survey of Prompting Methods in Natural Language Processing](https://arxiv.org/abs/2107.13586) (Jul 2021) + +## Enfocaments + + - [Boosted Prompt Ensembles for Large Language Models](https://arxiv.org/abs/2304.05970) (Abril 2023) + - [Global Prompt Cell: A Portable Control Module for Effective Prompt](https://arxiv.org/abs/2304.05642) (Abril 2023) + - [Why think step-by-step? Reasoning emerges from the locality of experience](https://arxiv.org/abs/2304.03843) (Abril 2023) + - [Revisiting Automated Prompting: Are We Actually Doing Better?](https://arxiv.org/abs/2304.03609) (Abril 2023) + - [REFINER: Reasoning Feedback on Intermediate Representations](https://arxiv.org/abs/2304.01904) (Abril 2023) + - [Reflexion: an autonomous agent with dynamic memory and self-reflection](https://arxiv.org/abs/2303.11366) (Març 2023) + - [CAMEL: Communicative Agents for "Mind" Exploration of Large Scale Language Model Society](https://arxiv.org/abs/2303.17760) (Març 2023) + - [Self-Refine: Iterative Refinement with Self-Feedback](https://arxiv.org/abs/2303.17651v1) (Març 2023) + - [kNN Prompting: Beyond-Context Learning with Calibration-Free Nearest Neighbor Inference](https://arxiv.org/abs/2303.13824) (Març 2023) + - [Visual-Language Prompt Tuning with Knowledge-guided Context Optimization](https://arxiv.org/abs/2303.13283) (Març 2023) + - [Fairness-guided Few-shot Prompting for Large Language Models](https://arxiv.org/abs/2303.13217) (Març 2023) + - [Context-faithful Prompting for Large Language Models](https://arxiv.org/abs/2303.11315) (Març 2023) + - [Is Prompt All You Need? No. A Comprehensive and Broader View of Instruction Learning](https://arxiv.org/abs/2303.10475) (Març 2023) + - [UPRISE: Universal Prompt Retrieval for Improving Zero-Shot Evaluation](https://arxiv.org/abs/2303.08518) (Març 2023) + - [Model-tuning Via Prompts Makes NLP Models Adversarially Robust](https://arxiv.org/abs/2303.07320) (Març 2023) + - [Structure Pretraining and Prompt Tuning for Knowledge Graph Transfer](https://arxiv.org/abs/2303.03922) (Març 2023) + - [CoTEVer: Chain of Thought Prompting Annotation Toolkit for Explanation Verification](https://arxiv.org/abs/2303.03628) (Març 2023) + - [Larger language models do in-context learning differently](https://arxiv.org/abs/2303.03846) (Març 2023) + - [OpenICL: An Open-Source Framework for In-context Learning](https://arxiv.org/abs/2303.02913) (Març 2023) + - [Dynamic Prompting: A Unified Framework for Prompt Tuning](https://arxiv.org/abs/2303.02909) (Març 2023) + - [Multitask Prompt Tuning Enables Parameter-Efficient Transfer Learning](https://arxiv.org/abs/2303.02861) (Març 2023) + - [Effectiveness of Data Augmentation for Prefix Tuning with Limited Data](https://arxiv.org/abs/2303.02577) (Març 2023) + - [Mixture of Soft Prompts for Controllable Data Generation](https://arxiv.org/abs/2303.01580) (Març 2023) + - [Prompt, Generate, then Cache: Cascade of Foundation Models makes Strong Few-shot Learners](https://arxiv.org/abs/2303.02151) (Març 2023) + - [How Robust is GPT-3.5 to Predecessors? A Comprehensive Study on Language Understanding Tasks](https://arxiv.org/abs/2303.00293) (Març 2023) + - [Can ChatGPT Understand Too? A Comparative Study on ChatGPT and Fine-tuned BERT](https://arxiv.org/pdf/2302.10198.pdf) (Feb 2023) + - [EvoPrompting: Language Models for Code-Level Neural Architecture Search](https://arxiv.org/abs/2302.14838) (Feb 2023) + - [In-Context Instruction Learning](https://arxiv.org/abs/2302.14691) (Feb 2023) + - [Chain of Hindsight Aligns Language Models with Feedback](https://arxiv.org/abs/2302.02676) (Feb 2023) + - [Language Is Not All You Need: Aligning Perception with Language Models](https://arxiv.org/abs/2302.14045) (Feb 2023) + - [Automatic Prompt Augmentation and Selection with Chain-of-Thought from Labeled Data](https://arxiv.org/abs/2302.12822) (Feb 2023) + - [Active Prompting with Chain-of-Thought for Large Language Models](https://arxiv.org/abs/2302.12246) (Feb 2023) + - [More than you've asked for: A Comprehensive Analysis of Novel Prompt Injection Threats to Application-Integrated Large Language Models](https://arxiv.org/abs/2302.12173) (Feb 2023) + - [A Prompt Pattern Catalog to Enhance Prompt Engineering with ChatGPT](https://arxiv.org/abs/2302.11382) (Feb 2023) + - [Guiding Large Language Models via Directional Stimulus Prompting](https://arxiv.org/abs/2302.11520) (Feb 2023) + - [How Does In-Context Learning Help Prompt Tuning?](https://arxiv.org/abs/2302.11521) (Feb 2023) + - [Scalable Prompt Generation for Semi-supervised Learning with Language Models](https://arxiv.org/abs/2302.09236) (Feb 2023) + - [Bounding the Capabilities of Large Language Models in Open Text Generation with Prompt Constraints](https://arxiv.org/abs/2302.09185) (Feb 2023) + - [À-la-carte Prompt Tuning (APT): Combining Distinct Data Via Composable Prompting](https://arxiv.org/abs/2302.07994) (Feb 2023) + - [GraphPrompt: Unifying Pre-Training and Downstream Tasks for Graph Neural Networks](https://arxiv.org/abs/2302.08043) (Feb 2023) + - [The Capacity for Moral Self-Correction in Large Language Models](https://arxiv.org/abs/2302.07459) (Feb 2023) + - [SwitchPrompt: Learning Domain-Specific Gated Soft Prompts for Classification in Low-Resource Domains](https://arxiv.org/abs/2302.06868) (Feb 2023) + - [Evaluating the Robustness of Discrete Prompts](https://arxiv.org/abs/2302.05619) (Feb 2023) + - [Compositional Exemplars for In-context Learning](https://arxiv.org/abs/2302.05698) (Feb 2023) + - [Hard Prompts Made Easy: Gradient-Based Discrete Optimization for Prompt Tuning and Discovery](https://arxiv.org/abs/2302.03668) (Feb 2023) + - [Multimodal Chain-of-Thought Reasoning in Language Models](https://arxiv.org/abs/2302.00923) (Feb 2023) + - [Large Language Models Can Be Easily Distracted by Irrelevant Context](https://arxiv.org/abs/2302.00093) (Feb 2023) + - [Synthetic Prompting: Generating Chain-of-Thought Demonstrations for Large Language Models](https://arxiv.org/abs/2302.00618) (Feb 2023) + - [Progressive Prompts: Continual Learning for Language Models](https://arxiv.org/abs/2301.12314) (Gener 2023) + - [Batch Prompting: Efficient Inference with LLM APIs](https://arxiv.org/abs/2301.08721) (Gener 2023) + - [Demonstrate-Search-Predict: Composing retrieval and language models for knowledge-intensive NLP](https://arxiv.org/abs/2212.14024) (Desembre 2022) + - [On Second Thought, Let's Not Think Step by Step! Bias and Toxicity in Zero-Shot Reasoning](https://arxiv.org/abs/2212.08061) (Desembre 2022) + - [Constitutional AI: Harmlessness from AI Feedback](https://arxiv.org/abs/2212.08073) (Desembre 2022) + - [Successive Prompting for Decomposing Complex Questions](https://arxiv.org/abs/2212.04092) (Desembre 2022) + - [Large Language Models are reasoners with Self-Verification](https://arxiv.org/abs/2212.09561v1) (Desembre 2022) + - [Discovering Language Model Behaviors with Model-Written Evaluations](https://arxiv.org/abs/2212.09251) (Desembre 2022) + - [Structured Prompting: Scaling In-Context Learning to 1,000 Examples](https://arxiv.org/abs/2212.06713) (Desembre 2022) + - [PAL: Program-aided Language Models](https://arxiv.org/abs/2211.10435) (Nov 2022) + - [Large Language Models Are Human-Level Prompt Engineers](https://arxiv.org/abs/2211.01910) (Nov 2022) + - [Ignore Previous Prompt: Attack Techniques For Language Models](https://arxiv.org/abs/2211.09527) (Nov 2022) + - [Machine Generated Text: A Comprehensive Survey of Threat Models and Detection Methods](https://arxiv.org/abs/2210.07321) (Nov 2022) + - [Teaching Algorithmic Reasoning via In-context Learning](https://arxiv.org/abs/2211.09066) (Nov 2022) + - [Enhancing Self-Consistency and Performance of Pre-Trained Language Models through Natural Language Inference](https://arxiv.org/abs/2211.11875) (Nov 2022) + - [Ask Me Anything: A simple strategy for prompting language models](https://paperswithcode.com/paper/ask-me-anything-a-simple-strategy-for) (Oct 2022) + - [Recitation-Augmented Language Models](https://arxiv.org/abs/2210.01296) (Oct 2022) + - [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/abs/2210.03629) (Oct 2022) + - [Prompting GPT-3 To Be Reliable](https://arxiv.org/abs/2210.09150) (Oct 2022) + - [Decomposed Prompting: A Modular Approach for Solving Complex Tasks](https://arxiv.org/abs/2210.02406) (Oct 2022) + - [Language Models Are Greedy Reasoners: A Systematic Formal Analysis of Chain-of-Thought](https://arxiv.org/abs/2210.01240v3) (Oct 2022) + - [Evaluating the Susceptibility of Pre-Trained Language Models via Handcrafted Adversarial Examples](https://arxiv.org/abs/2209.02128) (Setembre 2022) + - [Dynamic Prompt Learning via Policy Gradient for Semi-structured Mathematical Reasoning](https://arxiv.org/abs/2209.14610) (Setembre 2022) + - [Promptagator: Few-shot Dense Retrieval From 8 Examples](https://arxiv.org/abs/2209.11755) (Setembre 2022) + - [Atlas: Few-shot Learning with Retrieval Augmented Language Models](https://arxiv.org/abs/2208.03299) (Nov 2022) + - [DocPrompting: Generating Code by Retrieving the Docs](https://arxiv.org/abs/2207.05987) (Juliol 2022) + - [On the Advance of Making Language Models Better Reasoners](https://arxiv.org/abs/2206.02336) (June 2022) + - [Large Language Models are Zero-Shot Reasoners](https://arxiv.org/abs/2205.11916) (May 2022) + - [Maieutic Prompting: Logically Consistent Reasoning with Recursive Explanations](https://arxiv.org/abs/2205.11822) (May 2022) + - [MRKL Systems: A modular, neuro-symbolic architecture that combines large language models, external knowledge sources and discrete reasoning](https://arxiv.org/abs/2205.00445) (May 2022) + - [PPT: Pre-trained Prompt Tuning for Few-shot Learning](https://aclanthology.org/2022.acl-long.576/) (Mqy 2022) + - [Toxicity Detection with Generative Prompt-based Inference](https://arxiv.org/abs/2205.12390) (May 2022) + - [Learning to Transfer Prompts for Text Generation](https://arxiv.org/abs/2205.01543) (May 2022) + - [The Unreliability of Explanations in Few-shot Prompting for Textual Reasoning](https://arxiv.org/abs/2205.03401) (May 2022) + - [A Taxonomy of Prompt Modifiers for Text-To-Image Generation](https://arxiv.org/abs/2204.13988) (Apr 2022) + - [PromptChainer: Chaining Large Language Model Prompts through Visual Programming](https://arxiv.org/abs/2203.06566) (Març 2022) + - [Self-Consistency Improves Chain of Thought Reasoning in Language Models](https://arxiv.org/abs/2203.11171) (Març 2022) + - [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155) + - [Rethinking the Role of Demonstrations: What Makes In-Context Learning Work?](https://arxiv.org/abs/2202.12837) (Feb 2022) + - [Chain of Thought Prompting Elicits Reasoning in Large Language Models](https://arxiv.org/abs/2201.11903) (Gener 2022) + - [Show Your Work: Scratchpads for Intermediate Computation with Language Models](https://arxiv.org/abs/2112.00114) (Nov 2021) + - [AI Chains: Transparent and Controllable Human-AI Interaction by Chaining Large Language Model Prompts](https://arxiv.org/abs/2110.01691) (Oct 2021) + - [Generated Knowledge Prompting for Commonsense Reasoning](https://arxiv.org/abs/2110.08387) (Oct 2021) + - [Multitask Prompted Training Enables Zero-Shot Task Generalization](https://arxiv.org/abs/2110.08207) (Oct 2021) + - [Reframing Instructional Prompts to GPTk's Language](https://arxiv.org/abs/2109.07830) (Setembre 2021) + - [Design Guidelines for Prompt Engineering Text-to-Image Generative Models](https://arxiv.org/abs/2109.06977) (Setembre 2021) + - [Making Pre-trained Language Models Better Few-shot Learners](https://aclanthology.org/2021.acl-long.295) (Aug 2021) + - [Fantastically Ordered Prompts and Where to Find Them: Overcoming Few-Shot Prompt Order Sensitivity](https://arxiv.org/abs/2104.08786) (Abril 2021) + - [BERTese: Learning to Speak to BERT](https://aclanthology.org/2021.eacl-main.316) (Abril 2021) + - [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/abs/2104.08691) (Abril 2021) + - [Prompt Programming for Large Language Models: Beyond the Few-Shot Paradigm](https://arxiv.org/abs/2102.07350) (Feb 2021) + - [Calibrate Before Use: Improving Few-Shot Performance of Language Models](https://arxiv.org/abs/2102.09690) (Feb 2021) + - [Prefix-Tuning: Optimizing Continuous Prompts for Generation](https://arxiv.org/abs/2101.00190) (Gener 2021) + - [Learning to Generate Task-Specific Adapters from Task Description](https://arxiv.org/abs/2101.00420) (Gener 2021) + - [Making Pre-trained Language Models Better Few-shot Learners](https://arxiv.org/abs/2012.15723) (Desembre 2020) + - [Learning from Task Descriptions](https://aclanthology.org/2020.emnlp-main.105/) (Nov 2020) + - [AutoPrompt: Eliciting Knowledge from Language Models with Automatically Generated Prompts](https://arxiv.org/abs/2010.15980) (Oct 2020) + - [Language Models are Few-Shot Learners](https://arxiv.org/abs/2005.14165) (May 2020) + - [How Can We Know What Language Models Know?](https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00324/96460/How-Can-We-Know-What-Language-Models-Know) (Juliol 2020) + - [Scaling Laws for Neural Language Models](https://arxiv.org/abs/2001.08361) (Gener 2020) + +## Aplicacions + + - [Are LLMs All You Need for Task-Oriented Dialogue?](https://arxiv.org/abs/2304.06556) (Abril 2023) + - [HiPrompt: Few-Shot Biomedical Knowledge Fusion via Hierarchy-Oriented Prompting](https://arxiv.org/abs/2304.05973) (Abril 2023) + - [Approximating Human Evaluation of Social Chatbots with Prompting](https://arxiv.org/abs/2304.05253) (Abril 2023) + - [Automated Reading Passage Generation with OpenAI's Large Language Model](https://arxiv.org/abs/2304.04616) (Abril 2023) + - [WebBrain: Learning to Generate Factually Correct Articles for Queries by Grounding on Large Web Corpus](https://arxiv.org/abs/2304.04358) (Abril 2023) + - [Prompt Pre-Training with Twenty-Thousand Classes for Open-Vocabulary Visual Recognition](https://arxiv.org/abs/2304.04704) (Abril 2023) + - [GPT detectors are biased against non-native English writers](https://arxiv.org/abs/2304.02819) (Abril 2023) + - [Zero-Shot Next-Item Recommendation using Large Pretrained Language Models](https://arxiv.org/abs/2304.03153) (Abril 2023) + - [Large Language Models as Master Key: Unlocking the Secrets of Materials Science with GPT](https://arxiv.org/abs/2304.02213) (Abril 2023) + - [Efficiently Aligned Cross-Lingual Transfer Learning for Conversational Tasks using Prompt-Tuning](https://arxiv.org/abs/2304.01295) (Abril 2023) + - [Better Language Models of Code through Self-Improvement](https://arxiv.org/abs/2304.01228) (Abril 2023) + - [PromptORE -- A Novel Approach Towards Fully Unsupervised Relation Extraction](https://arxiv.org/abs/2304.01209) (Abril) + - [Assessing Language Model Deployment with Risk Cards]() (Abril 2023) + - [Enhancing Large Language Models with Climate Resources](https://arxiv.org/abs/2304.00116) (Març 2023) + - [BloombergGPT: A Large Language Model for Finance](https://arxiv.org/abs/2303.17564) (Març 2023) + - [Medical Intervention Duration Estimation Using Language-enhanced Transformer Encoder with Medical Prompts](https://arxiv.org/abs/2303.17408) (Març 2023) + - [Soft-prompt tuning to predict lung cancer using primary care free-text Dutch medical notes](https://arxiv.org/abs/2303.15846) (Març 2023) + - [TaskMatrix.AI: Completing Tasks by Connecting Foundation Models with Millions of APIs](https://arxiv.org/abs/2303.16434) (Març 2023) + - [Larger Probes Tell a Different Story: Extending Psycholinguistic Datasets Via In-Context Learning](https://arxiv.org/abs/2303.16445) (Març 2023) + - [Linguistically Informed ChatGPT Prompts to Enhance Japanese-Chinese Machine Translation: A Case Study on Attributive Clauses](https://arxiv.org/abs/2303.15587) (Març 2023) + - [Knowledge-augmented Frame Semantic Parsing with Hybrid Prompt-tuning](https://arxiv.org/abs/2303.14375) (Març 2023) + - [Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation](https://arxiv.org/abs/2303.15413) (Març 2023) + - [Zero-shot Model Diagnosis](https://arxiv.org/abs/2303.15441#) (Març 2023) + - [Prompting Large Language Models to Generate Code-Mixed Texts: The Case of South East Asian Languages](https://arxiv.org/abs/2303.13592) (Març 2023) + - [SPeC: A Soft Prompt-Based Calibration on Mitigating Performance Variability in Clinical Notes Summarization](https://arxiv.org/abs/2303.13035) (Març 2023) + - [Large Language Models and Simple, Stupid Bugs](https://arxiv.org/abs/2303.11455) (Març 2023) + - [Can Generative Pre-trained Transformers (GPT) Pass Assessments in Higher Education Programming Courses?](https://arxiv.org/abs/2303.09325) (Març 2023) + - [SelfCheckGPT: Zero-Resource Black-Box Hallucination Detection for Generative Large Language Models](https://arxiv.org/abs/2303.08896) (Març 2023) + - [Large Language Models in the Workplace: A Case Study on Prompt Engineering for Job Type Classification](https://arxiv.org/abs/2303.07142) (Març 2023) + - [ICL-D3IE: In-Context Learning with Diverse Demonstrations Updating for Document Information Extraction](https://arxiv.org/abs/2303.05063) (Març 2023) + - [MathPrompter: Mathematical Reasoning using Large Language Models](https://arxiv.org/abs/2303.05398) (Març 2023) + - [Prompt-Based Learning for Thread Structure Prediction in Cybersecurity Forums](https://arxiv.org/abs/2303.05400) (Març 2023) + - [Choice Over Control: How Users Write with Large Language Models using Diegetic and Non-Diegetic Prompting](https://arxiv.org/abs/2303.03199) (Març 2023) + - [Prompting Large Language Models with Answer Heuristics for Knowledge-based Visual Question Answering](https://arxiv.org/abs/2303.01903) (Març 2023) + - [Soft Prompt Guided Joint Learning for Cross-Domain Sentiment Analysis](https://arxiv.org/abs/2303.00815) (Març 2023) + - [SpeechPrompt v2: Prompt Tuning for Speech Classification Tasks](https://arxiv.org/abs/2303.00733) (Març 2023) + - [Goal Driven Discovery of Distributional Differences via Language Descriptions](https://arxiv.org/abs/2302.14233) (Feb 2023) + - [Navigating the Grey Area: Expressions of Overconfidence and Uncertainty in Language Models](https://arxiv.org/abs/2302.13439) (Feb 2023) + - [TabGenie: A Toolkit for Table-to-Text Generation](https://arxiv.org/abs/2302.14169) (Feb 2023) + - [SGL-PT: A Strong Graph Learner with Graph Prompt Tuning](https://arxiv.org/abs/2302.12449) (Feb 2023) + - [Few-Shot Table-to-Text Generation with Prompt-based Adapter](https://arxiv.org/abs/2302.12468) (Feb 2023) + - [Language Models Are Few-shot Learners for Prognostic Prediction](https://arxiv.org/abs/2302.12692) (Feb 2023) + - [STA: Self-controlled Text Augmentation for Improving Text Classifications](https://arxiv.org/abs/2302.12784) (Feb 2023) + - [Check Your Facts and Try Again: Improving Large Language Models with External Knowledge and Automated Feedback](https://arxiv.org/abs/2302.12813) (Feb 2023) + - [How Generative AI models such as ChatGPT can be (Mis)Used in SPC Practice, Education, and Research? An Exploratory Study](https://arxiv.org/abs/2302.10916) (Feb 2023) + - [Grimm in Wonderland: Prompt Engineering with Midjourney to Illustrate Fairytales](https://arxiv.org/abs/2302.08961) (Feb 2023) + - [LabelPrompt: Effective Prompt-based Learning for Relation Classification](https://arxiv.org/abs/2302.08068) (Feb 2023) + - [Language Model Crossover: Variation through Few-Shot Prompting](https://arxiv.org/abs/2302.09236) (Feb 2023) + - [Prompt Tuning of Deep Neural Networks for Speaker-adaptive Visual Speech Recognition](https://arxiv.org/abs/2302.08102) (Feb 2023) + - [The Capacity for Moral Self-Correction in Large Language Models](https://arxiv.org/abs/2302.07459) (Feb 2023) + - [Prompting for Multimodal Hateful Meme Classification](https://arxiv.org/abs/2302.04156) (Feb 2023) + - [PLACES: Prompting Language Models for Social Conversation Synthesis](https://arxiv.org/abs/2302.03269) (Feb 2023) + - [Commonsense-Aware Prompting for Controllable Empathetic Dialogue Generation](https://arxiv.org/abs/2302.01441) (Feb 2023) + - [Crawling the Internal Knowledge-Base of Language Models](https://arxiv.org/abs/2301.12810) (Gener 2023) + - [Legal Prompt Engineering for Multilingual Legal Judgement Prediction](https://arxiv.org/abs/2212.02199) (Desembre 2022) + - [Investigating Prompt Engineering in Diffusion Models](https://arxiv.org/abs/2211.15462) (Nov 2022) + - [Learn to Explain: Multimodal Reasoning via Thought Chains for Science Question Answering](https://arxiv.org/abs/2209.09513v2) (Setembre 2022) + - [Conversing with Copilot: Exploring Prompt Engineering for Solving CS1 Problems Using Natural Language](https://arxiv.org/abs/2210.15157) (Oct 2022) + - [Piloting Copilot and Codex: Hot Temperature, Cold Prompts, or Black Magic?](https://arxiv.org/abs/2210.14699) (Oct 2022) + - [Plot Writing From Scratch Pre-Trained Language Models](https://aclanthology.org/2022.inlg-main.5) (Juliol 2022) + - [Survey of Hallucination in Natural Language Generation](https://arxiv.org/abs/2202.03629) (Feb 2022) + +## Col·leccions + + - [Chain-of-Thought Papers](https://github.com/Timothyxxx/Chain-of-ThoughtsPapers) + - [Papers with Code](https://paperswithcode.com/task/prompt-engineering) + - [Prompt Papers](https://github.com/thunlp/PromptPapers#papers) diff --git a/ca_pages/readings.ca.mdx b/ca_pages/readings.ca.mdx new file mode 100644 index 0000000..b813e96 --- /dev/null +++ b/ca_pages/readings.ca.mdx @@ -0,0 +1,118 @@ +# Lectures addicionals +#### (ordenat per nom) + +- [2023 AI Index Report](https://aiindex.stanford.edu/report/) +- [3 Principles for prompt engineering with GPT-3](https://www.linkedin.com/pulse/3-principles-prompt-engineering-gpt-3-ben-whately) +- [Eight Things to Know about Large Language Models](https://arxiv.org/pdf/2304.00612v1.pdf) +- [A beginner-friendly guide to generative language models - LaMBDA guide](https://aitestkitchen.withgoogle.com/how-lamda-works) +- [A Complete Introduction to Prompt Engineering for Large Language Models](https://www.mihaileric.com/posts/a-complete-introduction-to-prompt-engineering) +- [A Generic Framework for ChatGPT Prompt Engineering](https://medium.com/@thorbjoern.heise/a-generic-framework-for-chatgpt-prompt-engineering-7097f6513a0b) +- [An SEO’s guide to ChatGPT prompts](https://searchengineland.com/chatgpt-prompts-seo-393523) +- [Anyone can Design! With a little help from Generative AI](https://github.com/YashSharma/PromptEngineering) +- [AI Content Generation](https://www.jonstokes.com/p/ai-content-generation-part-1-machine) +- [AI's rise generates new job title: Prompt engineer](https://www.axios.com/2023/02/22/chatgpt-prompt-engineers-ai-job) +- [AI Safety, RLHF, and Self-Supervision - Jared Kaplan | Stanford MLSys #79](https://www.youtube.com/watch?v=fqC3D-zNJUM&ab_channel=StanfordMLSysSeminars) +- [Awesome Textual Instruction Learning Papers](https://github.com/RenzeLou/awesome-instruction-learning) +- [Awesome ChatGPT Prompts](https://github.com/f/awesome-chatgpt-prompts) +- [Best 100+ Stable Diffusion Prompts](https://mpost.io/best-100-stable-diffusion-prompts-the-most-beautiful-ai-text-to-image-prompts) +- [Best practices for prompt engineering with OpenAI API](https://help.openai.com/en/articles/6654000-best-practices-for-prompt-engineering-with-openai-api) +- [Building GPT-3 applications — beyond the prompt](https://medium.com/data-science-at-microsoft/building-gpt-3-applications-beyond-the-prompt-504140835560) +- [Can AI really be protected from text-based attacks?](https://techcrunch.com/2023/02/24/can-language-models-really-be-protected-from-text-based-attacks/) +- [ChatGPT, AI and GPT-3 Apps and use cases](https://gpt3demo.com) +- [ChatGPT Prompts](https://twitter.com/aaditsh/status/1636398208648658945?s=20) +- [ChatGPT Plugins Collection ⭐️ (unofficial)](https://github.com/logankilpatrick/ChatGPT-Plugins-Collection) +- [ChatGPT3 Prompt Engineering](https://github.com/mattnigh/ChatGPT3-Free-Prompt-List) +- [CMU Advanced NLP 2022: Prompting](https://youtube.com/watch?v=5ef83Wljm-M&feature=shares) +- [Common Sense as Dark Matter - Yejin Choi | Stanford MLSys #78](https://youtube.com/live/n4HakBqoCVg?feature=shares) +- [Create images with your words – Bing Image Creator comes to the new Bing](https://blogs.microsoft.com/blog/2023/03/21/create-images-with-your-words-bing-image-creator-comes-to-the-new-bing/) +- [Curtis64's set of prompt gists](https://gist.github.com/Curtis-64) +- [CS324 - Large Language Models](https://stanford-cs324.github.io/winter2022/) +- [CS 324 - Advances in Foundation Models](https://stanford-cs324.github.io/winter2023/) +- [CS224N: Natural Language Processing with Deep Learning](https://web.stanford.edu/class/cs224n/) +- [DALL·E 2 Prompt Engineering Guide](https://docs.google.com/document/d/11WlzjBT0xRpQhP9tFMtxzd0q6ANIdHPUBkMV-YB043U/edit#) +- [DALL·E 2 Preview - Risks and Limitations](https://github.com/openai/dalle-2-preview/blob/main/system-card.md) +- [DALLE Prompt Book](https://dallery.gallery/the-dalle-2-prompt-book) +- [DALL-E, Make Me Another Picasso, Please](https://www.newyorker.com/magazine/2022/07/11/dall-e-make-me-another-picasso-please?) +- [Diffusion Models: A Practical Guide](https://scale.com/guides/diffusion-models-guide) +- [Exploiting GPT-3 Prompts](https://twitter.com/goodside/status/1569128808308957185) +- [Exploring Prompt Injection Attacks](https://research.nccgroup.com/2022/12/05/exploring-prompt-injection-attacks) +- [Extrapolating to Unnatural Language Processing with GPT-3's In-context Learning: The Good, the Bad, and the Mysterious](http://ai.stanford.edu/blog/in-context-learning) +- [FVQA 2.0: Introducing Adversarial Samples into Fact-based Visual Question Answering](https://arxiv.org/pdf/2303.10699.pdf) +- [Generative AI with Cohere: Part 1 - Model Prompting](https://txt.cohere.ai/generative-ai-part-1) +- [Generative AI: Perspectives from Stanford HAI](https://hai.stanford.edu/sites/default/files/2023-03/Generative_AI_HAI_Perspectives.pdf) +- [Get a Load of This New Job: "Prompt Engineers" Who Act as Psychologists to AI Chatbots](https://futurism.com/prompt-engineers-ai) +- [Giving GPT-3 a Turing Test](https://lacker.io/ai/2020/07/06/giving-gpt-3-a-turing-test.html) +- [GPT-3 & Beyond](https://youtube.com/watch?v=-lnHHWRCDGk) +- [GPT3 and Prompts: A quick primer](https://buildspace.so/notes/intro-to-gpt3-prompts) +- [Hands-on with Bing’s new ChatGPT-like features](https://techcrunch.com/2023/02/08/hands-on-with-the-new-bing/) +- [How to Draw Anything](https://andys.page/posts/how-to-draw) +- [How to get images that don't suck](https://www.reddit.com/r/StableDiffusion/comments/x41n87/how_to_get_images_that_dont_suck_a) +- [How to make LLMs say true things](https://evanjconrad.com/posts/world-models) +- [How to perfect your prompt writing for AI generators](https://www.sydney.edu.au/news-opinion/news/2023/02/28/how-to-perfect-your-prompt-writing-for-ai-generators.html) +- [How to write good prompts](https://andymatuschak.org/prompts) +- [If I Was Starting Prompt Engineering in 2023: My 8 Insider Tips](https://youtube.com/watch?v=SirW7feTjh0&feature=shares) +- [Indirect Prompt Injection on Bing Chat](https://greshake.github.io/) +- [Interactive guide to GPT-3 prompt parameters](https://sevazhidkov.com/interactive-guide-to-gpt-3-prompt-parameters) +- [Introduction to ChatGPT](https://www.edx.org/course/introduction-to-chatgpt) +- [Introduction to Reinforcement Learning with Human Feedback](https://www.surgehq.ai/blog/introduction-to-reinforcement-learning-with-human-feedback-rlhf-series-part-1) +- [In defense of prompt engineering](https://simonwillison.net/2023/Feb/21/in-defense-of-prompt-engineering/) +- [JailBreaking ChatGPT: Everything You Need to Know](https://metaroids.com/learn/jailbreaking-chatgpt-everything-you-need-to-know/) +- [Language Models and Prompt Engineering: Systematic Survey of Prompting Methods in NLP](https://youtube.com/watch?v=OsbUfL8w-mo&feature=shares) +- [Language Model Behavior: A Comprehensive Survey](https://arxiv.org/abs/2303.11504) +- [Learn Prompting](https://learnprompting.org) +- [Learning Prompt](https://github.com/thinkingjimmy/Learning-Prompt) +- [Meet Claude: Anthropic’s Rival to ChatGPT](https://scale.com/blog/chatgpt-vs-claude) +- [Methods of prompt programming](https://generative.ink/posts/methods-of-prompt-programming) +- [Mysteries of mode collapse](https://www.lesswrong.com/posts/t9svvNPNmFf5Qa3TA/mysteries-of-mode-collapse) +- [NLP for Text-to-Image Generators: Prompt Analysis](https://heartbeat.comet.ml/nlp-for-text-to-image-generators-prompt-analysis-part-1-5076a44d8365) +- [NLP with Deep Learning CS224N/Ling284 - Lecture 11: Promting, Instruction Tuning, and RLHF](http://web.stanford.edu/class/cs224n/slides/cs224n-2023-lecture11-prompting-rlhf.pdf) +- [Notes for Prompt Engineering by sw-yx](https://github.com/sw-yx/ai-notes) +- [On pitfalls (and advantages) of sophisticated large language models](https://arxiv.org/abs/2303.17511) +- [OpenAI Cookbook](https://github.com/openai/openai-cookbook) +- [OpenAI Prompt Examples for several applications](https://platform.openai.com/examples) +- [Pretrain, Prompt, Predict - A New Paradigm for NLP](http://pretrain.nlpedia.ai) +- [Prompt Engineer: Tech's hottest job title?](https://www.peoplematters.in/article/talent-management/is-prompt-engineering-the-hottest-job-in-ai-today-37036) +- [Prompt Engineering by Lilian Weng](https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/) +- [Prompt Engineering 101 - Introduction and resources](https://www.linkedin.com/pulse/prompt-engineering-101-introduction-resources-amatriain) +- [Prompt Engineering 101: Autocomplete, Zero-shot, One-shot, and Few-shot prompting](https://youtube.com/watch?v=v2gD8BHOaX4&feature=shares) +- [Prompt Engineering 101](https://humanloop.com/blog/prompt-engineering-101) +- [Prompt Engineering - A new profession ?](https://www.youtube.com/watch?v=w102J3_9Bcs&ab_channel=PatrickDebois) +- [Prompt Engineering by co:here](https://docs.cohere.ai/docs/prompt-engineering) +- [Prompt Engineering by Microsoft](https://microsoft.github.io/prompt-engineering) +- [Prompt Engineering: The Career of Future](https://shubhamsaboo111.medium.com/prompt-engineering-the-career-of-future-2fb93f90f117) +- [Prompt engineering davinci-003 on our own docs for automated support (Part I)](https://www.patterns.app/blog/2022/12/21/finetune-llm-tech-support) +- [Prompt Engineering Guide: How to Engineer the Perfect Prompts](https://richardbatt.co.uk/prompt-engineering-guide-how-to-engineer-the-perfect-prompts) +- [Prompt Engineering in GPT-3](https://www.analyticsvidhya.com/blog/2022/05/prompt-engineering-in-gpt-3) +- [Prompt Engineering Template](https://docs.google.com/spreadsheets/d/1-snKDn38-KypoYCk9XLPg799bHcNFSBAVu2HVvFEAkA/edit#gid=0) +- [Prompt Engineering Topic by GitHub](https://github.com/topics/prompt-engineering) +- [Prompt Engineering: The Ultimate Guide 2023 [GPT-3 & ChatGPT]](https://businessolution.org/prompt-engineering/) +- [Prompt Engineering: From Words to Art](https://www.saxifrage.xyz/post/prompt-engineering) +- [Prompt Engineering with OpenAI's GPT-3 and other LLMs](https://youtube.com/watch?v=BP9fi_0XTlw&feature=shares) +- [Prompt injection attacks against GPT-3](https://simonwillison.net/2022/Sep/12/prompt-injection) +- [Prompt injection to read out the secret OpenAI API key](https://twitter.com/ludwig_stumpp/status/1619701277419794435?s=20&t=GtoMlmYCSt-UmvjqJVbBSA) +- [Prompting: Better Ways of Using Language Models for NLP Tasks](https://thegradient.pub/prompting/) +- [Prompting for Few-shot Learning](https://www.cs.princeton.edu/courses/archive/fall22/cos597G/lectures/lec05.pdf) +- [Prompting in NLP: Prompt-based zero-shot learning](https://savasy-22028.medium.com/prompting-in-nlp-prompt-based-zero-shot-learning-3f34bfdb2b72) +- [Prompting Methods with Language Models and Their Applications to Weak Supervision](https://snorkel.ai/prompting-methods-with-language-models-nlp) +- [Prompts as Programming by Gwern](https://www.gwern.net/GPT-3#prompts-as-programming) +- [Prompts for communicators using the new AI-powered Bing](https://blogs.microsoft.com/blog/2023/03/16/prompts-for-communicators-using-the-new-ai-powered-bing/) +- [Reverse Prompt Engineering for Fun and (no) Profit](https://lspace.swyx.io/p/reverse-prompt-eng) +- [Retrieving Multimodal Information for Augmented Generation: A Survey](https://arxiv.org/pdf/2303.10868.pdf) +- [So you want to be a prompt engineer: Critical careers of the future](https://venturebeat.com/ai/so-you-want-to-be-a-prompt-engineer-critical-careers-of-the-future/) +- [Simulators](https://www.lesswrong.com/posts/vJFdjigzmcXMhNTsx/simulators) +- [Start with an Instruction](https://beta.openai.com/docs/quickstart/start-with-an-instruction) +- [Talking to machines: prompt engineering & injection](https://artifact-research.com/artificial-intelligence/talking-to-machines-prompt-engineering-injection) +- [Tech’s hottest new job: AI whisperer. No coding required](https://www.washingtonpost.com/technology/2023/02/25/prompt-engineers-techs-next-big-job/) +- [The Book - Fed Honeypot](https://fedhoneypot.notion.site/25fdbdb69e9e44c6877d79e18336fe05?v=1d2bf4143680451986fd2836a04afbf4) +- [The ChatGPT Prompt Book](https://docs.google.com/presentation/d/17b_ocq-GL5lhV_bYSShzUgxL02mtWDoiw9xEroJ5m3Q/edit#slide=id.gc6f83aa91_0_79) +- [The ChatGPT list of lists: A collection of 3000+ prompts, examples, use-cases, tools, APIs, extensions, fails and other resources](https://medium.com/mlearning-ai/the-chatgpt-list-of-lists-a-collection-of-1500-useful-mind-blowing-and-strange-use-cases-8b14c35eb) +- [The Most Important Job Skill of This Century](https://www.theatlantic.com/technology/archive/2023/02/openai-text-models-google-search-engine-bard-chatbot-chatgpt-prompt-writing/672991/) +- [The Mirror of Language](https://deepfates.com/the-mirror-of-language) +- [The Waluigi Effect (mega-post)](https://www.lesswrong.com/posts/D7PumeYTDPfBTp3i7/the-waluigi-effect-mega-post) +- [Thoughts and impressions of AI-assisted search from Bing](https://simonwillison.net/2023/Feb/24/impressions-of-bing/) +- [Unleash Your Creativity with Generative AI: Learn How to Build Innovative Products!](https://youtube.com/watch?v=jqTkMpziGBU&feature=shares) +- [Unlocking Creativity with Prompt Engineering](https://youtube.com/watch?v=PFsbWAC4_rk&feature=shares) +- [Using GPT-Eliezer against ChatGPT Jailbreaking](https://www.alignmentforum.org/posts/pNcFYZnPdXyL2RfgA/using-gpt-eliezer-against-chatgpt-jailbreaking) +- [What Is ChatGPT Doing … and Why Does It Work?](https://writings.stephenwolfram.com/2023/02/what-is-chatgpt-doing-and-why-does-it-work/) +- [Why is ChatGPT so good?](https://scale.com/blog/chatgpt-reinforcement-learning) +- [【徹底解説】これからのエンジニアの必携スキル、プロンプトエンジニアリングの手引「Prompt Engineering Guide」を読んでまとめてみた](https://dev.classmethod.jp/articles/how-to-design-prompt-engineering/) diff --git a/ca_pages/risks.ca.mdx b/ca_pages/risks.ca.mdx new file mode 100644 index 0000000..a8bc977 --- /dev/null +++ b/ca_pages/risks.ca.mdx @@ -0,0 +1,11 @@ +# Riscs i Mal ús + +import { Callout } from 'nextra-theme-docs' + +Ja hem vist com de efectives poden ser els prompts ben dissenyats per a diverses tasques utilitzant tècniques com l'aprenentatge amb poques mostres i l'encadenament de pensaments. A mesura que pensem en construir aplicacions reals basades en LLMs, esdevé crucial reflexionar sobre els mal ús, riscs i pràctiques de seguretat relacionats amb els models de llenguatge. + +Aquesta secció se centra en destacar alguns dels riscs i mal ús dels LLMs mitjançant tècniques com injeccions de prompts. També destaca comportaments perjudicials i com potencialment mitigar-los mitjançant tècniques de prompts efectives. Altres temes d'interès inclouen generalitzabilitat, calibratge, biaixos, biaixos socials i factualitat, per esmentar-ne alguns. + + + Aquesta secció està sota un intens desenvolupament. + \ No newline at end of file diff --git a/ca_pages/risks/_meta.ca.json b/ca_pages/risks/_meta.ca.json new file mode 100644 index 0000000..92aa587 --- /dev/null +++ b/ca_pages/risks/_meta.ca.json @@ -0,0 +1,5 @@ +{ + "adversarial": "Adversarial Prompting", + "factuality": "Factualitat", + "biases": "Sesgos" +} \ No newline at end of file diff --git a/ca_pages/style.css b/ca_pages/style.css new file mode 100644 index 0000000..452f8be --- /dev/null +++ b/ca_pages/style.css @@ -0,0 +1 @@ +pre { white-space: pre-wrap; } \ No newline at end of file diff --git a/ca_pages/techniques.ca.mdx b/ca_pages/techniques.ca.mdx new file mode 100644 index 0000000..c33270e --- /dev/null +++ b/ca_pages/techniques.ca.mdx @@ -0,0 +1,5 @@ +# Tècniques de Prompts + +Fins ara, hauria de ser evident que ajuda a millorar els prompts per obtenir millors resultats en diferents tasques. Aquesta és la idea principal darrere l'enginyeria de prompts. + +Encara que els exemples bàsics eren divertits, en aquesta secció tractem tècniques d'enginyeria de prompts més avançades que ens permeten aconseguir tasques més complexes i interessants. \ No newline at end of file diff --git a/ca_pages/techniques/_meta.ca.json b/ca_pages/techniques/_meta.ca.json new file mode 100644 index 0000000..b019118 --- /dev/null +++ b/ca_pages/techniques/_meta.ca.json @@ -0,0 +1,13 @@ +{ + "zeroshot": "Prompt sense entrenament previ (Zero-shot)", + "fewshot": "Prompt amb poques mostres (Few-shot)", + "cot": "Prompt cadena de pensament (CoT)", + "consistency": "Autoconsistència", + "knowledge": "Prompt de coneixement generat", + "ape": "Enginyeria de prompts automàtic (APE)", + "activeprompt": "Prompt actiu", + "dsp": "Prompt d'Estímul dirigit", + "react": "ReAct", + "multimodalcot": "Prompt CoT multimodal", + "graph": "Prompt de graf" +} diff --git a/ca_pages/tools.ca.mdx b/ca_pages/tools.ca.mdx new file mode 100644 index 0000000..cd74da2 --- /dev/null +++ b/ca_pages/tools.ca.mdx @@ -0,0 +1,43 @@ +# Eines i Llibreries +#### (Ordenades per nom) + +- [AI Test Kitchen](https://aitestkitchen.withgoogle.com) +- [betterprompt](https://github.com/krrishdholakia/betterprompt) +- [ChatGPT Prompt Generator](https://huggingface.co/spaces/merve/ChatGPT-prompt-generator) +- [ClickPrompt](https://github.com/prompt-engineering/click-prompt) +- [DreamStudio](https://beta.dreamstudio.ai) +- [DUST](https://dust.tt) +- [Dyno](https://trydyno.com) +- [EmergentMind](https://www.emergentmind.com) +- [EveryPrompt](https://www.everyprompt.com) +- [Guardrails](https://github.com/ShreyaR/guardrails) +- [GPT Index](https://github.com/jerryjliu/gpt_index) +- [GPTTools](https://gpttools.com/comparisontool) +- [hwchase17/adversarial-prompts](https://github.com/hwchase17/adversarial-prompts) +- [Interactive Composition Explorer](https://github.com/oughtinc/ice) +- [LangChain](https://github.com/hwchase17/langchain) +- [Lexica](https://lexica.art) +- [LMFlow](https://github.com/OptimalScale/LMFlow) +- [loom](https://github.com/socketteer/loom) +- [Metaprompt](https://metaprompt.vercel.app/?task=gpt) +- [OpenAI Playground](https://beta.openai.com/playground) +- [OpenICL](https://github.com/Shark-NLP/OpenICL) +- [OpenPrompt](https://github.com/thunlp/OpenPrompt) +- [OpenPlayground](https://nat.dev/) +- [Playground](https://playgroundai.com) +- [Prodia](https://app.prodia.com/#/) +- [Prompt Base](https://promptbase.com) +- [Prompt Engine](https://github.com/microsoft/prompt-engine) +- [Prompt Generator for OpenAI's DALL-E 2](http://dalle2-prompt-generator.s3-website-us-west-2.amazonaws.com) +- [Promptable](https://promptable.ai) +- [PromptInject](https://github.com/agencyenterprise/PromptInject) +- [Prompts.ai](https://github.com/sevazhidkov/prompts-ai) +- [Promptmetheus](https://promptmetheus.com) +- [PromptPerfect](https://promptperfect.jina.ai/) +- [Promptly](https://trypromptly.com/) +- [PromptSource](https://github.com/bigscience-workshop/promptsource) +- [Promptist](https://promptist.herokuapp.com/) +- [Scale SpellBook](https://scale.com/spellbook) +- [sharegpt](https://sharegpt.com) +- [ThoughtSource](https://github.com/OpenBioLink/ThoughtSource) +- [Visual Prompt Builder](https://tools.saxifrage.xyz/prompt)