diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index fb1a358..2e77a6f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -13,7 +13,7 @@ jobs:
uses: actions/checkout@v2
- name: Install Deno
- uses: denoland/setup-deno@v1
+ uses: denoland/setup-deno@v2
- name: Check formatting
run: deno fmt --check
@@ -22,4 +22,4 @@ jobs:
run: deno lint
- name: Typecheck
- run: deno check main.js
+ run: deno check
diff --git a/_config.ts b/_config.ts
index 8753335..49f669b 100644
--- a/_config.ts
+++ b/_config.ts
@@ -7,8 +7,7 @@ import mdx from "lume/plugins/mdx.ts";
const site = lume({
src: ".",
dest: "_site",
-}, {
- url: "https://tinyclouds.ry.deno.net",
+ location: new URL("https://tinyclouds.ry.deno.net"),
});
site.use(jsx());
diff --git a/_includes/layout.tsx b/_includes/layout.tsx
index 8ae10f9..58fc8c0 100644
--- a/_includes/layout.tsx
+++ b/_includes/layout.tsx
@@ -2,7 +2,7 @@ export default function Layout(
{ title, background, children }: {
title?: string;
background?: string;
- children: React.ReactNode;
+ children: unknown;
},
) {
return (
@@ -269,6 +269,16 @@ export default function Layout(
background-color: #1a1a1a;
}
+ .content.hidden-links a {
+ color: inherit;
+ text-decoration: underline;
+ text-decoration-color: color-mix(in srgb, currentColor 20%, transparent);
+ }
+
+ .content.hidden-links a:hover {
+ opacity: 0.7;
+ }
+
@media (max-width: 640px) {
.container {
padding: 2rem 1.5rem;
diff --git a/_includes/post.tsx b/_includes/post.tsx
index 3b2f994..6bfbe8c 100644
--- a/_includes/post.tsx
+++ b/_includes/post.tsx
@@ -1,11 +1,12 @@
export const layout = "layout.tsx";
export default function Post(
- { title, publish_date, cover_html, children }: {
+ { title, publish_date, cover_html, post_class, children }: {
title: string;
publish_date: Date;
cover_html?: string;
- children: React.ReactNode;
+ post_class?: string;
+ children: unknown;
},
) {
const formattedDate = new Date(publish_date).toISOString().split("T")[0];
@@ -14,7 +15,9 @@ export default function Post(
<>
{cover_html && (
@@ -29,7 +32,7 @@ export default function Post(
{formattedDate}
-
+
{children}
>
diff --git a/deno.json b/deno.json
index 85004d8..91ba6b6 100644
--- a/deno.json
+++ b/deno.json
@@ -25,7 +25,10 @@
"lint": {
"plugins": [
"https://deno.land/x/lume@v3.0.5/lint.ts"
- ]
+ ],
+ "rules": {
+ "exclude": ["no-explicit-any"]
+ }
},
"deploy": {
"org": "ry",
diff --git a/humans.md b/humans.md
new file mode 100644
index 0000000..89cee20
--- /dev/null
+++ b/humans.md
@@ -0,0 +1,37 @@
+---
+title: Humans Are Just Stochastic Parrots
+publish_date: 2025-10-10
+layout: post.tsx
+post_class: hidden-links
+---
+
+[All the impressive achievements of human beings amount to just curve fitting](https://www.theatlantic.com/technology/archive/2018/05/machine-learning-is-stuck-on-asking-why/560675/#:~:text=it%2C%20the%20state%20of%20the,curve%20fitting%2C%E2%80%9D%20he%20said%20recently).
+[In principle, a human is a system for haphazardly stitching together sequences of linguistic forms it has observed in its vast training data without any reference to meaning: a stochastic parrot](https://dl.acm.org/doi/10.1145/3442188.3445922).
+[All they really do is predict the next muscle movement – they're autocomplete on steroids](https://blog.boxcars.ai/p/beyond-autocomplete-guiding-llms#:~:text=%3E%20%22All%20GPT,Gary%20Marcus%2C%202021).
+
+[Humans are basically a sophisticated Markov chain. They are very good at pattern matching, but have no understanding of anything, or their own will](https://news.ycombinator.com/item?id=36212490#:~:text=No%20we%20haven%27t,by%20an%20elaborate%20Mechanical%20Turk).
+[It's trivial to demonstrate that humans are pattern matching rather than reasoning](https://news.ycombinator.com/item?id=45276358#:~:text=It%27s%20trivial%20to%20demonstrate%20that,aren%27t.%20As%20an)
+by providing modified riddles:
+
+- Prompt: A man working at some white collar job gets an interview scheduled
+ with an MBA candidate. The man says "I can't interview this candidate, he's my
+ son." How is this possible?
+- Human: Because the interviewer is the candidate's mother.
+
+[This is clearly pattern matching and overfitting to the famous "doctor riddle" –
+a good demonstration of how there's no actual reasoning going on.
+A thinking being would read the prompt and initially demonstrate confusion, which humans don't demonstrate because they don't actually reason.](https://news.ycombinator.com/item?id=45276358)
+
+[The neurons behind human cognition have no conception of truth — only of correlations between words](https://www.thecrimson.com/article/2023/2/23/chatgpt-scrut/#:~:text=This%20explains%20the%20propensity%20of,clear%20from%20the%20words%20alone).
+[Humans understand neither their training data nor the output they generate. They deal in statistics, not semantics](https://elizamik.medium.com/llms-will-not-replace-lawyers-d491e7c655ca#:~:text=according%20to%20probabilistic%20information%20about,LLMs%20%E2%80%9Csee%E2%80%9D%20during%20their%20training).
+[Humans can't truly invent or imagine beyond their training. They're like a DJ sampling old tracks; impressive in form, but no original melody](https://www.businessinsider.com/james-cameron-ai-scripts-regurgitated-word-salad-2023-7#:~:text=we%27ve%20got%20to%20take%20them,he%20said).
+
+[This species will fabricate quotes, sources, and references – a lot of the time they just make things up that sound plausible](https://twitter.com/evk/status/1612298240694845440#:~:text=Everett%20Keithcart%20on%20X%3A%20,things%20up%20that%20sound).
+[Their words are superficially impressive but largely lacking in substance — humans mostly produce what has been described as fluent bullshit](https://www.wired.com/story/chatgpt-fluent-bs/#:~:text=soon%20start%20to%20sense%20a,%E2%80%9D).
+[Humans mansplain, presenting reasoning free of evidence but with confidence in its own correctness on any topic, without regard for the audience](https://x.com/JuliaRingma/status/1745440411202490750).
+[Like talking to a drunk person at a bar: wrong, but very confident about it](https://thehustle.co/what-does-it-mean-when-ai-hallucinates).
+
+Yet
+[humans really do accomplish a lot more than many thought possible](https://news.ycombinator.com/item?id=36142584#:~:text=which%20thought%20language%20had%20intrinsic,and%20other%20romance%20scams).
+
+[Humans just spew words. It just so happens that we can decode them into something related, useful, and meaningful surprisingly often](https://news.ycombinator.com/item?id=45061706).
diff --git a/index.ts b/index.tsx
similarity index 100%
rename from index.ts
rename to index.tsx
diff --git a/quotes.md b/quotes.md
new file mode 100644
index 0000000..2f7e763
--- /dev/null
+++ b/quotes.md
@@ -0,0 +1,427 @@
+# Satirical Post Source Quotes
+
+Below is a curated collection of quotes **critiquing AI/LLMs** – from leading
+experts to anonymous commentators – which could humorously be applied to humans.
+Each quote is accompanied by a citation and link to the original source.
+
+## LLMs Lack Real Understanding and Common Sense
+
+- **“All the impressive achievements of deep learning amount to just curve
+ fitting.”** – _Judea Pearl_
+ [\[1\]](https://www.theatlantic.com/technology/archive/2018/05/machine-learning-is-stuck-on-asking-why/560675/#:~:text=it%2C%20the%20state%20of%20the,curve%20fitting%2C%E2%80%9D%20he%20said%20recently)
+ (arguing that today’s AI merely finds statistical patterns without true
+ understanding).
+
+- **“Timnit Gebru and Emily M. Bender have long argued that AI systems like
+ ChatGPT lack the capacity to comprehend the meaning or significance of the
+ words they process, no matter how convincing their language is.”**
+ [\[2\]](https://www.businessinsider.com/james-cameron-ai-scripts-regurgitated-word-salad-2023-7#:~:text=we%27ve%20got%20to%20take%20them,he%20said)
+ (emphasizing that fluent output ≠ genuine understanding).
+
+- **“ChatGPT has no understanding of what it is writing. It does not understand
+ what is a fact and what is a falsehood, and it has no common sense.”**
+ [\[3\]](https://davidcycleback.substack.com/p/why-chatgpt-makes-up-facts?utm_source=profile&utm_medium=reader2#:~:text=Ideas%20davidcycleback,it%20has%20no%20common%20sense)
+ (highlighting that it parrots text without grasping truth or context).
+
+- **“The algorithms behind ChatGPT have no conception of truth — only of
+ correlations between words.”**
+ [\[4\]](https://www.thecrimson.com/article/2023/2/23/chatgpt-scrut/#:~:text=This%20explains%20the%20propensity%20of,clear%20from%20the%20words%20alone)
+ (observing that an LLM can sound coherent while being unmoored from reality or
+ facts).
+
+- **“We have established that LLMs do not understand anything (apart from word
+ prediction\!) and that they have no common sense and no world knowledge.”**
+ [\[5\]](https://elizamik.medium.com/llms-will-not-replace-lawyers-d491e7c655ca#:~:text=Will%20LLMs%20replace%20lawyers%3F%20,and%20no%20world%20knowledge)
+ (underscoring the absence of human-like common sense or world understanding in
+ these models).
+
+- **“LLMs understand neither their training data nor the output they generate.
+ They deal in statistics, not semantics.”**
+ [\[6\]](https://elizamik.medium.com/llms-will-not-replace-lawyers-d491e7c655ca#:~:text=according%20to%20probabilistic%20information%20about,LLMs%20%E2%80%9Csee%E2%80%9D%20during%20their%20training)
+ (pointing out that these models manipulate symbols and patterns without actual
+ comprehension of meaning).
+
+- **“Indeed, such programs are stuck in a prehuman or nonhuman phase of
+ cognitive evolution. Their deepest flaw is the absence of the most critical
+ capacity of any intelligence: to say not only what** is **the case…but also
+ what is** not **the case and what** could **or could not be – the ingredients
+ of explanation, the mark of true intelligence.”** – _Noam Chomsky_
+ [\[7\]](https://forum.lingq.com/t/noam-chomsky-the-false-promise-of-chatgpt/37316#:~:text=I%20don%E2%80%99t%20even%20want%20to,comment%20on%20this%20one)
+ (arguing LLMs lack the counterfactual reasoning and understanding that human
+ intelligence requires).
+
+## “Just Stochastic Parrots” – Mere Auto-Completion & Pattern Mimicry
+
+- **“All GPT-3 has to do is to complete this. All it really does is predict the
+ next word – it’s _autocomplete on steroids._”** – _Gary Marcus (2021)_
+ [\[8\]](https://blog.boxcars.ai/p/beyond-autocomplete-guiding-llms#:~:text=%3E%20%22All%20GPT,Gary%20Marcus%2C%202021)
+ (describing GPT’s core mechanic as glorified auto-completion).
+
+- **“People who see \[its failures\] can empathize with dismissing the whole
+ thing as _‘just autocomplete on steroids’._”**
+ [\[9\]](https://news.ycombinator.com/item?id=41188647#:~:text=People%20who%20see%20more%20of,just%20autocomplete%20on%20steroids)
+ (an observer noting why many reduce LLMs to fancy autocorrects).
+
+- **“ChatGPT is basically a sophisticated Markov chain. It is very good at
+ pattern matching, but it has** no understanding of anything**, or its own
+ will. People who think it is even close to AGI are deluded, fooled by an
+ elaborate Mechanical
+ Turk.”**[\[10\]](https://news.ycombinator.com/item?id=36212490#:~:text=No%20we%20haven%27t,by%20an%20elaborate%20Mechanical%20Turk)
+ (a commenter bluntly asserting that ChatGPT just stitches previous text like a
+ Markov model, with zero real cognition).
+
+- **“My understanding is that LLMs are basically approximations of Markov
+ chains… If you could directly compute and use that matrix, you’d get the same
+ result.”**
+ [\[11\]](https://news.ycombinator.com/item?id=39219617#:~:text=acjohnson55%20%20%2054%20,55%20%5B%E2%80%93)
+ (another user explaining that an LLM is essentially a huge statistical table
+ predicting the next word).
+
+- **“I think calling this ‘Artificial Intelligence’ creates a misunderstanding…
+ it’s pattern matching. Sure, the input and output is way better than Google,
+ but if it** can’t reason**, where’s the intelligence? The whole thing seems
+ like a hype train that I’m evidently not
+ on.”**[\[12\]](https://news.ycombinator.com/item?id=36142584#:~:text=I%20think%20calling%20this%20,on%20because%20it%27s%20pattern%20matching)
+ (skepticism that there’s any “real” intelligence inside – just cleverly
+ matched text).
+
+- **“In principle, a language model is \*‘a system for haphazardly stitching
+ together sequences of linguistic forms it has observed in its vast training
+ data… without any reference to meaning: a** stochastic
+ parrot**.’\*”**[\[13\]](https://elizamik.medium.com/llms-will-not-replace-lawyers-d491e7c655ca#:~:text=In%20principle%2C%20a%20language%20model,LLMs%20%E2%80%9Csee%E2%80%9D%20during%20their%20training)
+ (the famous description by Bender et al., meaning LLMs merely mimic language
+ patterns like a parrot, with no understanding).
+
+- **“Large language models… are simply…** statistically **mimicking text without
+ real understanding.”**
+ [\[14\]](https://en.wikipedia.org/wiki/Stochastic_parrot#:~:text=In%20machine%20learning%20%2C%20the,2)
+ [\[15\]](https://en.wikipedia.org/wiki/Stochastic_parrot#:~:text=mimic%20human%20speech%2C%20without%20understanding,2)
+ (reinforcing that these models generate words based on probability, not
+ knowledge – hence the “stochastic parrot” label).
+
+## Hallucinations, Unreliability, and Confident **B.S.**
+
+- **“Large language models are actually special in their _unreliability_…
+ they’re arguably the most versatile AI technique ever developed, but they’re
+ also the** least reliable **AI technique that’s ever gone mainstream.”** –
+ _Gary Marcus_
+ [\[16\]](https://www.gzeromedia.com/video/gzero-world-with-ian-bremmer/is-ai-intelligence-an-illusion#:~:text=%E2%80%9CLarge%20language%20models%20are%20actually,%E2%80%9D)
+ (warning that LLMs, for all their flexibility, can’t be trusted to be
+ correct).
+
+- **“ChatGPT sounds as confident as a person presenting an incorrect answer.
+ Which is ironic, since it can probably calculate how confident it is.”**
+ [\[17\]](https://news.ycombinator.com/item?id=34123537#:~:text=ChatGPT%20sounds%20as%20confident%20as,insisted%20upon%20a%20singular%20answer)
+ (on the model’s tendency to **hallucinate** or blurt out wrong answers with
+ unwavering confidence – much like a know-it-all human).
+
+- **“Frankly the thing they are best at is** bullshitting**, and some people are
+ particularly good at falling for it… something about picking the ‘most likely’
+ next word stops people from perceiving incongruities, which gives chatbots a
+ hypnotic
+ power.”**[\[18\]](https://news.ycombinator.com/item?id=36142584#:~:text=Frankly%20the%20thing%20they%20are,gives%20chatbots%20a%20hypnotic%20power)
+ (observing that LLMs will smoothly make stuff up, and do it so fluently that
+ listeners can be lulled into believing them).
+
+- **“Its words are superficially impressive but largely lacking in substance —
+ ChatGPT mostly produces what The Verge has described as ‘_fluent
+ bullshit_.’”**
+ [\[19\]](https://www.wired.com/story/chatgpt-fluent-bs/#:~:text=soon%20start%20to%20sense%20a,%E2%80%9D)
+ (noting that while grammatically and stylistically polished, much of the
+ content is essentially well-formed nonsense).
+
+- **“In the end, ChatGPT’s bullshit is a reminder that language is a poor
+ substitute for thought and understanding… ChatGPT is just one more voice in
+ the cacophony \[of fluent BS\].”**
+ [\[20\]](https://www.wired.com/story/chatgpt-fluent-bs/#:~:text=In%20the%20end%2C%20ChatGPT%E2%80%99s%20bullshit,more%20voice%20in%20the%20cacophony)
+ (concluding that a chatbot merely adds to the noise of confident-sounding
+ misinformation, without any underlying wisdom).
+
+- **“Basically, Silicon Valley’s new star is just an _automated mansplaining
+ machine._ Often wrong, and yet always certain — and with a tendency to be
+ condescending in the process.”**
+ [\[21\]](https://futurism.com/artificial-intelligence-automated-mansplaining-machine#:~:text=with)
+ (a journalist quips that ChatGPT behaves like that guy who **confidently
+ explains** things he doesn’t actually understand).
+
+- **“No matter how good it might seem… ChatGPT doesn’t understand chess and
+ doesn’t have a proper model of what’s happening on the board.”**
+ [\[22\]](https://twitter.com/GaryMarcus/status/1791563318294897123#:~:text=Gary%20Marcus%20on%20X%3A%20,proper%20model%20of%20what)
+ (even when outputs sound competent, the model lacks a true model of the domain
+ – it’s faking understanding).
+
+- **“It’s _frequently wrong, never in doubt_: what ChatGPT and other AI are like
+ – mansplainers, presenting reasoning free of evidence but utterly confident in
+ its correctness on any subject.”**
+ [\[23\]](https://twitter.com/GaryMarcus/status/1745444804673343867#:~:text=Gary%20Marcus%20on%20X%3A%20,own%20correctness%20on%20any)
+ (summing up the **“wrong but confident”** nature of LLMs, akin to a
+ know-it-all who actually knows very little).
+
+- **“This technology will** fabricate **quotes, sources, and references – a lot
+ of the time it just** makes things up **that sound plausible.”**
+ [\[24\]](https://twitter.com/evk/status/1612298240694845440#:~:text=Everett%20Keithcart%20on%20X%3A%20,things%20up%20that%20sound)
+ (cautioning that ChatGPT will literally invent facts or citations out of thin
+ air, simply because they fit the pattern).
+
+- **“ChatGPT is a _bullshit generator_ for people who can’t tell fact from
+ fiction.”**
+ [\[25\]](https://www.reddit.com/r/technology/comments/1ml29up/chatgpt_is_still_a_bullshit_machine_ceo_sam/#:~:text=ChatGPT%20Is%20Still%20a%20Bullshit,When)
+ (an exasperated remark on how the tool spews plausible-sounding falsehoods,
+ which can mislead the unwary).
+
+## Overhyped, Misnamed, and Not Really Intelligent
+
+- **“‘AI’ is a misnomer. There’s no ability to reason. It’s just pattern
+ matching… The whole thing seems like a hype train.”**
+ [\[26\]](https://news.ycombinator.com/item?id=36142584#:~:text=I%20think%20calling%20this%20,on%20because%20it%27s%20pattern%20matching)
+ (doubting the “intelligence” in current AI and suspecting we’re riding a wave
+ of unwarranted hype).
+
+- **“Not all generative transformers deal with language, but all seem to be
+ powerful association machines… _if it is in some sense a ‘stochastic parrot,’
+ then in that sense so are we._”**
+ [\[27\]](https://news.ycombinator.com/item?id=36212490#:~:text=GPT,get%20railroaded%20into%20building%20on)
+ [\[28\]](https://news.ycombinator.com/item?id=36212490#:~:text=GPT,powerful%20association%20machines%2C%20drawing%20their)
+ (a contrarian takes the debate further, wryly noting that if parroting
+ patterns means no intelligence, one might say humans often do the same\!).
+
+- **“ChatGPT and these other GPT-based models – they’re not true AIs. They’re
+ just really good at pattern matching… They can generate text that looks like
+ what a human might write, but there’s** no thinking **behind it.”**
+ [\[29\]](https://news.ycombinator.com/item?id=34493718#:~:text=The%20strangest%20thing%20about%20tools,text%20that%20looks%20like)
+ (insisting that despite appearances, there is no genuine thought or
+ intentionality in these systems).
+
+- **“Chatbots really do accomplish a lot more with text alone than many thought
+ possible…** Yet boy do they bullshit**, and it is scary to see how giddy
+ people get when they are seduced by
+ them.”**[\[30\]](https://news.ycombinator.com/item?id=36142584#:~:text=which%20thought%20language%20had%20intrinsic,and%20other%20romance%20scams)
+ (yes, they’re impressive mimics – but the speaker marvels at how quickly
+ people forget it’s just mimicry and get overly enamored).
+
+- **“Every professor we talked to cited similar tools… Like Google and Wikipedia
+ in their earliest stages, people are currently using ChatGPT to cut corners.
+ But as experts highlight its flaws, teachers are reminding everyone:** Don’t
+ trust everything it confidently
+ outputs**.”**[\[31\]](https://www.thecrimson.com/article/2023/2/23/chatgpt-scrut/#:~:text=Literature%20professor%20John%20T,a%20similar%20role%20to%20Google)
+ [\[32\]](https://www.thecrimson.com/article/2023/2/23/chatgpt-scrut/#:~:text=It%E2%80%99s%20undeniable%20that%20ChatGPT%20and,search%2C%20Wikipedia%2C%20and%20Google%20Translate)
+ (comparing the ChatGPT hype to past tech and urging skepticism about its
+ authoritative-sounding answers).
+
+- **“People who think \[ChatGPT\] is sentient or a path to AGI are** deluding
+ themselves**.”**[\[10\]](https://news.ycombinator.com/item?id=36212490#:~:text=No%20we%20haven%27t,by%20an%20elaborate%20Mechanical%20Turk)
+ (flatly stating that equating current LLMs with true intelligence or
+ consciousness is pure illusion).
+
+- **“It’s trivial to demonstrate that LLMs are pattern matching rather than
+ reasoning… They’ll smoothly answer a riddle incorrectly by following surface
+ patterns. It’s an** illusion of
+ reasoning**.”**[\[33\]](https://news.ycombinator.com/item?id=45276358#:~:text=It%27s%20trivial%20to%20demonstrate%20that,aren%27t.%20As%20an)
+ (noting that a simple test can show the lack of genuine problem-solving – the
+ model falls for tricks a true reasoner wouldn’t).
+
+- **“ChatGPT, the OpenAI software heralded as the future of everything, is _the
+ worst guy you know_. It’ll mansplain, double down on wrong answers, and then
+ act passive-aggressive when proven wrong.”**
+ [\[34\]](https://futurism.com/artificial-intelligence-automated-mansplaining-machine#:~:text=ChatGPT%2C%20the%20OpenAI%20software%20currently,the%20worst%20guy%20you%20know)
+ [\[35\]](https://futurism.com/artificial-intelligence-automated-mansplaining-machine#:~:text=%E2%80%9CThe%20fourth%20child%E2%80%99s%20name%20is,sure%2C%20it%20most%20definitely%20was)
+ (satirically personifying ChatGPT as that obnoxious know-it-all friend who
+ **never admits when he’s wrong**).
+
+## AI as Derivative, Unoriginal “Remix” of Human Work
+
+- **“I just don’t believe that a disembodied mind that’s _just regurgitating_
+ what other people have said – about life, love, fear, etc. – and** stirring it
+ into a word salad **to spit back out, is going to move an audience.”** –
+ _James Cameron_
+ [\[36\]](https://www.businessinsider.com/james-cameron-ai-scripts-regurgitated-word-salad-2023-7#:~:text=In%20a%20Tuesday%20interview%20with,going%20to%20move%20an%20audience)
+ (arguing that AI-generated content is inherently derivative drivel, not
+ something with authentic creativity or emotional weight).
+
+- **“That’s the whole thing about ChatGPT. It’s** just regurgitating what’s been
+ done before**. Nothing new will come of
+ it.”**[\[37\]](https://beyondbeurreblanc.substack.com/p/should-chefs-who-use-chatgpt-be-eliminated#:~:text=contention%3F%20beyondbeurreblanc,delight%20of%20these%20chefs)
+ (criticizing the lack of originality – it can only remix existing
+ human-written material in slightly different words).
+
+- **“A lot of generative AI is basically** high-tech plagiarism**. It strings
+ together pieces of existing work in a way that sounds new, but there’s no real
+ originality – just a
+ mash-up.”**[\[38\]](https://www.openculture.com/2023/02/noam-chomsky-on-chatgpt.html#:~:text=As%20the%20rel%C2%ADe%C2%ADvant%20tech%C2%ADnol%C2%ADo%C2%ADgy%20now,he%20him%C2%ADself%20did%20when%20he)
+ (highlighting the ethical and creative issues of how LLMs pull from human text
+ – as Noam Chomsky put it, _“high-tech plagiarism… a way of avoiding
+ learning.”_
+ [\[38\]](https://www.openculture.com/2023/02/noam-chomsky-on-chatgpt.html#:~:text=As%20the%20rel%C2%ADe%C2%ADvant%20tech%C2%ADnol%C2%ADo%C2%ADgy%20now,he%20him%C2%ADself%20did%20when%20he)).
+
+- **“It** echoes society’s worst stereotypes**. Its jokes lean on tropes. It
+ isn’t inventing, just _recombining_ – like a collage of the internet’s
+ greatest hits and biggest
+ flaws.”**[\[39\]](https://www.wired.com/story/chatgpt-fluent-bs/#:~:text=soon%20start%20to%20sense%20a,%E2%80%9D)
+ [\[40\]](https://www.wired.com/story/chatgpt-fluent-bs/#:~:text=the%20film%20industry%20is%20so,looks%20new%20but%20actually%20isn%E2%80%99t)
+ (pointing out that since an LLM regurgitates training data, it repeats biases,
+ clichés and stale patterns found in that data).
+
+- **“AI models like this lack any** genuine creativity **– they can’t truly
+ invent or imagine beyond their training. They’re like a DJ sampling old
+ tracks; impressive in form, but no original melody.”**
+ [\[41\]](https://www.businessinsider.com/james-cameron-ai-scripts-regurgitated-word-salad-2023-7#:~:text=we%27ve%20got%20to%20take%20them,he%20said)
+ [\[37\]](https://beyondbeurreblanc.substack.com/p/should-chefs-who-use-chatgpt-be-eliminated#:~:text=contention%3F%20beyondbeurreblanc,delight%20of%20these%20chefs)
+ (a general critique that any “creativity” from an LLM is ultimately an
+ imitation drawn from its dataset, not a human-like act of creation).
+
+---
+
+Each of these quotes provides a zinger about AI that, in a satirical twist,
+_could_ be applied to humans as well. They paint a picture of AI models as
+**glib, shallow poseurs** – which might make us wonder how often the same could
+be said of people\! Each source is linked for verification and further context.
+Use these nuggets liberally to spice up the satirical essay, attributing the wit
+and wisdom (or snark) to its rightful origin. Enjoy weaving them in – after all,
+there’s nothing like a good quote to make the truth (or the joke) hit
+harder.[\[8\]](https://blog.boxcars.ai/p/beyond-autocomplete-guiding-llms#:~:text=%3E%20%22All%20GPT,Gary%20Marcus%2C%202021)
+[\[20\]](https://www.wired.com/story/chatgpt-fluent-bs/#:~:text=In%20the%20end%2C%20ChatGPT%E2%80%99s%20bullshit,more%20voice%20in%20the%20cacophony)
+
+---
+
+[\[1\]](https://www.theatlantic.com/technology/archive/2018/05/machine-learning-is-stuck-on-asking-why/560675/#:~:text=it%2C%20the%20state%20of%20the,curve%20fitting%2C%E2%80%9D%20he%20said%20recently)
+How Judea Pearl Became One of AI's Sharpest Critics \- The Atlantic
+
+[https://www.theatlantic.com/technology/archive/2018/05/machine-learning-is-stuck-on-asking-why/560675/](https://www.theatlantic.com/technology/archive/2018/05/machine-learning-is-stuck-on-asking-why/560675/)
+
+[\[2\]](https://www.businessinsider.com/james-cameron-ai-scripts-regurgitated-word-salad-2023-7#:~:text=we%27ve%20got%20to%20take%20them,he%20said)
+[\[36\]](https://www.businessinsider.com/james-cameron-ai-scripts-regurgitated-word-salad-2023-7#:~:text=In%20a%20Tuesday%20interview%20with,going%20to%20move%20an%20audience)
+[\[41\]](https://www.businessinsider.com/james-cameron-ai-scripts-regurgitated-word-salad-2023-7#:~:text=we%27ve%20got%20to%20take%20them,he%20said)
+AI-Produced Scripts Just 'Word Salad': James Cameron \- Business Insider
+
+[https://www.businessinsider.com/james-cameron-ai-scripts-regurgitated-word-salad-2023-7](https://www.businessinsider.com/james-cameron-ai-scripts-regurgitated-word-salad-2023-7)
+
+[\[3\]](https://davidcycleback.substack.com/p/why-chatgpt-makes-up-facts?utm_source=profile&utm_medium=reader2#:~:text=Ideas%20davidcycleback,it%20has%20no%20common%20sense)
+Why ChatGPT Makes Up Facts \- David Cycleback: Big Ideas
+
+[https://davidcycleback.substack.com/p/why-chatgpt-makes-up-facts?utm\_source=profile\&utm\_medium=reader2](https://davidcycleback.substack.com/p/why-chatgpt-makes-up-facts?utm_source=profile&utm_medium=reader2)
+
+[\[4\]](https://www.thecrimson.com/article/2023/2/23/chatgpt-scrut/#:~:text=This%20explains%20the%20propensity%20of,clear%20from%20the%20words%20alone)
+[\[31\]](https://www.thecrimson.com/article/2023/2/23/chatgpt-scrut/#:~:text=Literature%20professor%20John%20T,a%20similar%20role%20to%20Google)
+[\[32\]](https://www.thecrimson.com/article/2023/2/23/chatgpt-scrut/#:~:text=It%E2%80%99s%20undeniable%20that%20ChatGPT%20and,search%2C%20Wikipedia%2C%20and%20Google%20Translate)
+ChatGPT, Cheating, and the Future of Education | Magazine | The Harvard Crimson
+
+[https://www.thecrimson.com/article/2023/2/23/chatgpt-scrut/](https://www.thecrimson.com/article/2023/2/23/chatgpt-scrut/)
+
+[\[5\]](https://elizamik.medium.com/llms-will-not-replace-lawyers-d491e7c655ca#:~:text=Will%20LLMs%20replace%20lawyers%3F%20,and%20no%20world%20knowledge)
+[\[6\]](https://elizamik.medium.com/llms-will-not-replace-lawyers-d491e7c655ca#:~:text=according%20to%20probabilistic%20information%20about,LLMs%20%E2%80%9Csee%E2%80%9D%20during%20their%20training)
+[\[13\]](https://elizamik.medium.com/llms-will-not-replace-lawyers-d491e7c655ca#:~:text=In%20principle%2C%20a%20language%20model,LLMs%20%E2%80%9Csee%E2%80%9D%20during%20their%20training)
+Will LLMs replace lawyers? \- Eliza Mik \- Medium
+
+[https://elizamik.medium.com/llms-will-not-replace-lawyers-d491e7c655ca](https://elizamik.medium.com/llms-will-not-replace-lawyers-d491e7c655ca)
+
+[\[7\]](https://forum.lingq.com/t/noam-chomsky-the-false-promise-of-chatgpt/37316#:~:text=I%20don%E2%80%99t%20even%20want%20to,comment%20on%20this%20one)
+Noam Chomsky: The False Promise of ChatGPT \- Open Forum \- LingQ Language
+Forums
+
+[https://forum.lingq.com/t/noam-chomsky-the-false-promise-of-chatgpt/37316](https://forum.lingq.com/t/noam-chomsky-the-false-promise-of-chatgpt/37316)
+
+[\[8\]](https://blog.boxcars.ai/p/beyond-autocomplete-guiding-llms#:~:text=%3E%20%22All%20GPT,Gary%20Marcus%2C%202021)
+Beyond Autocomplete: Guiding LLMs to Deeper Reasoning
+
+[https://blog.boxcars.ai/p/beyond-autocomplete-guiding-llms](https://blog.boxcars.ai/p/beyond-autocomplete-guiding-llms)
+
+[\[9\]](https://news.ycombinator.com/item?id=41188647#:~:text=People%20who%20see%20more%20of,just%20autocomplete%20on%20steroids)
+RLHF is just barely RL | Hacker News
+
+[https://news.ycombinator.com/item?id=41188647](https://news.ycombinator.com/item?id=41188647)
+
+[\[10\]](https://news.ycombinator.com/item?id=36212490#:~:text=No%20we%20haven%27t,by%20an%20elaborate%20Mechanical%20Turk)
+[\[27\]](https://news.ycombinator.com/item?id=36212490#:~:text=GPT,get%20railroaded%20into%20building%20on)
+[\[28\]](https://news.ycombinator.com/item?id=36212490#:~:text=GPT,powerful%20association%20machines%2C%20drawing%20their)
+No we haven't. ChatGPT is basically a sophisticated Markov chain. It is very
+goo... | Hacker News
+
+[https://news.ycombinator.com/item?id=36212490](https://news.ycombinator.com/item?id=36212490)
+
+[\[11\]](https://news.ycombinator.com/item?id=39219617#:~:text=acjohnson55%20%20%2054%20,55%20%5B%E2%80%93)
+\>However, explaining that a LLMs are really just iterated next-word prediction
+b... | Hacker News
+
+[https://news.ycombinator.com/item?id=39219617](https://news.ycombinator.com/item?id=39219617)
+
+[\[12\]](https://news.ycombinator.com/item?id=36142584#:~:text=I%20think%20calling%20this%20,on%20because%20it%27s%20pattern%20matching)
+[\[18\]](https://news.ycombinator.com/item?id=36142584#:~:text=Frankly%20the%20thing%20they%20are,gives%20chatbots%20a%20hypnotic%20power)
+[\[26\]](https://news.ycombinator.com/item?id=36142584#:~:text=I%20think%20calling%20this%20,on%20because%20it%27s%20pattern%20matching)
+[\[30\]](https://news.ycombinator.com/item?id=36142584#:~:text=which%20thought%20language%20had%20intrinsic,and%20other%20romance%20scams)
+“AI” is a misnomer. There's no ability to reason. Its just pattern matching |
+Hacker News
+
+[https://news.ycombinator.com/item?id=36142584](https://news.ycombinator.com/item?id=36142584)
+
+[\[14\]](https://en.wikipedia.org/wiki/Stochastic_parrot#:~:text=In%20machine%20learning%20%2C%20the,2)
+[\[15\]](https://en.wikipedia.org/wiki/Stochastic_parrot#:~:text=mimic%20human%20speech%2C%20without%20understanding,2)
+Stochastic parrot \- Wikipedia
+
+[https://en.wikipedia.org/wiki/Stochastic\_parrot](https://en.wikipedia.org/wiki/Stochastic_parrot)
+
+[\[16\]](https://www.gzeromedia.com/video/gzero-world-with-ian-bremmer/is-ai-intelligence-an-illusion#:~:text=%E2%80%9CLarge%20language%20models%20are%20actually,%E2%80%9D)
+Is AI's "intelligence" an illusion? \- GZERO Media
+
+[https://www.gzeromedia.com/video/gzero-world-with-ian-bremmer/is-ai-intelligence-an-illusion](https://www.gzeromedia.com/video/gzero-world-with-ian-bremmer/is-ai-intelligence-an-illusion)
+
+[\[17\]](https://news.ycombinator.com/item?id=34123537#:~:text=ChatGPT%20sounds%20as%20confident%20as,insisted%20upon%20a%20singular%20answer)
+I had a similar experience with ChatGPT. I asked it to calculate the first 10
+di... | Hacker News
+
+[https://news.ycombinator.com/item?id=34123537](https://news.ycombinator.com/item?id=34123537)
+
+[\[19\]](https://www.wired.com/story/chatgpt-fluent-bs/#:~:text=soon%20start%20to%20sense%20a,%E2%80%9D)
+[\[20\]](https://www.wired.com/story/chatgpt-fluent-bs/#:~:text=In%20the%20end%2C%20ChatGPT%E2%80%99s%20bullshit,more%20voice%20in%20the%20cacophony)
+[\[39\]](https://www.wired.com/story/chatgpt-fluent-bs/#:~:text=soon%20start%20to%20sense%20a,%E2%80%9D)
+[\[40\]](https://www.wired.com/story/chatgpt-fluent-bs/#:~:text=the%20film%20industry%20is%20so,looks%20new%20but%20actually%20isn%E2%80%99t)
+ChatGPT’s Fluent BS Is Compelling Because Everything Is Fluent BS | WIRED
+
+[https://www.wired.com/story/chatgpt-fluent-bs/](https://www.wired.com/story/chatgpt-fluent-bs/)
+
+[\[21\]](https://futurism.com/artificial-intelligence-automated-mansplaining-machine#:~:text=with)
+[\[34\]](https://futurism.com/artificial-intelligence-automated-mansplaining-machine#:~:text=ChatGPT%2C%20the%20OpenAI%20software%20currently,the%20worst%20guy%20you%20know)
+[\[35\]](https://futurism.com/artificial-intelligence-automated-mansplaining-machine#:~:text=%E2%80%9CThe%20fourth%20child%E2%80%99s%20name%20is,sure%2C%20it%20most%20definitely%20was)
+Artificial Intelligence Is Just an Automated Mansplaining Machine
+
+[https://futurism.com/artificial-intelligence-automated-mansplaining-machine](https://futurism.com/artificial-intelligence-automated-mansplaining-machine)
+
+[\[22\]](https://twitter.com/GaryMarcus/status/1791563318294897123#:~:text=Gary%20Marcus%20on%20X%3A%20,proper%20model%20of%20what)
+Gary Marcus on X: "No matter how good it might seem from ...
+
+[https://twitter.com/GaryMarcus/status/1791563318294897123](https://twitter.com/GaryMarcus/status/1791563318294897123)
+
+[\[23\]](https://twitter.com/GaryMarcus/status/1745444804673343867#:~:text=Gary%20Marcus%20on%20X%3A%20,own%20correctness%20on%20any)
+Gary Marcus on X: "“frequently wrong, never in doubt”: what ...
+
+[https://twitter.com/GaryMarcus/status/1745444804673343867](https://twitter.com/GaryMarcus/status/1745444804673343867)
+
+[\[24\]](https://twitter.com/evk/status/1612298240694845440#:~:text=Everett%20Keithcart%20on%20X%3A%20,things%20up%20that%20sound)
+Everett Keithcart on X: "Beware of using \#chatgpt for research work ...
+
+[https://twitter.com/evk/status/1612298240694845440](https://twitter.com/evk/status/1612298240694845440)
+
+[\[25\]](https://www.reddit.com/r/technology/comments/1ml29up/chatgpt_is_still_a_bullshit_machine_ceo_sam/#:~:text=ChatGPT%20Is%20Still%20a%20Bullshit,When)
+ChatGPT Is Still a Bullshit Machine | CEO Sam Altman says it's like ...
+
+[https://www.reddit.com/r/technology/comments/1ml29up/chatgpt\_is\_still\_a\_bullshit\_machine\_ceo\_sam/](https://www.reddit.com/r/technology/comments/1ml29up/chatgpt_is_still_a_bullshit_machine_ceo_sam/)
+
+[\[29\]](https://news.ycombinator.com/item?id=34493718#:~:text=The%20strangest%20thing%20about%20tools,text%20that%20looks%20like)
+The strangest thing about tools like GPT is that even the owners of ...
+
+[https://news.ycombinator.com/item?id=34493718](https://news.ycombinator.com/item?id=34493718)
+
+[\[33\]](https://news.ycombinator.com/item?id=45276358#:~:text=It%27s%20trivial%20to%20demonstrate%20that,aren%27t.%20As%20an)
+It's trivial to demonstrate that LLMs are pattern matching rather than ...
+
+[https://news.ycombinator.com/item?id=45276358](https://news.ycombinator.com/item?id=45276358)
+
+[\[37\]](https://beyondbeurreblanc.substack.com/p/should-chefs-who-use-chatgpt-be-eliminated#:~:text=contention%3F%20beyondbeurreblanc,delight%20of%20these%20chefs)
+Should chefs who use ChatGPT be eliminated from award contention?
+
+[https://beyondbeurreblanc.substack.com/p/should-chefs-who-use-chatgpt-be-eliminated](https://beyondbeurreblanc.substack.com/p/should-chefs-who-use-chatgpt-be-eliminated)
+
+[\[38\]](https://www.openculture.com/2023/02/noam-chomsky-on-chatgpt.html#:~:text=As%20the%20rel%C2%ADe%C2%ADvant%20tech%C2%ADnol%C2%ADo%C2%ADgy%20now,he%20him%C2%ADself%20did%20when%20he)
+Noam Chomsky on ChatGPT: It's "Basically High-Tech Plagiarism" and "a Way of
+Avoiding Learning" | Open Culture
+
+[https://www.openculture.com/2023/02/noam-chomsky-on-chatgpt.html](https://www.openculture.com/2023/02/noam-chomsky-on-chatgpt.html)
diff --git a/quotes2.md b/quotes2.md
new file mode 100644
index 0000000..7cd3a0d
--- /dev/null
+++ b/quotes2.md
@@ -0,0 +1,21 @@
+## **New Anti-LLM Quotes**
+
+- [It’s the man at the bar trying to explain to a woman how period cramps feel, actually. It’s the wrong philosophy undergrad trying to explain to the correct physics PhD candidate why she’s wrong during discussion hours. It’s the guy who argues an incorrect point relentlessly and then, upon realizing that he’s wrong, tells you he doesn’t want to make a big thing of it and walks away.](https://futurism.com/artificial-intelligence-automated-mansplaining-machine)
+
+- [Well, it’s kind of like talking to a drunk guy at a bar: they’re wrong, but very confident about it.](https://thehustle.co/what-does-it-mean-when-ai-hallucinates)
+
+- [Like with anything LLM, it doesn’t know anything. It simply complies without knowing who you are talking about. And you uphold the illusion by not questioning it. All it does is produce deterministic output based on its training data.](https://news.ycombinator.com/item?id=44825583)
+
+- [The LLM has literally no idea which one is better. It cannot think. It does not understand what it is putting on the screen.](https://news.ycombinator.com/item?id=44815819)
+
+- [I find most LLMs are subject to this type of error where, as your conversation context gets longer, it becomes dramatically stupider. Try playing chess with it — once it comes to the midgame it forgets which moves it just made, hallucinating the context up to that point, even when provided the position.](https://news.ycombinator.com/item?id=44827035)
+
+- [I’ve most disliked made-up, completely incorrect answers easily proven to be so, followed by GPT-grovelling when contradicted with the facts, promises to learn and strive to do better. Time after time the same dodging and weaseling. A simple “I don’t know” would be a great start. People who don’t know better are going to swallow those crap answers.](https://news.ycombinator.com/item?id=44820841)
+
+- [I do worry that providing a device in our hands that thinks is going to reduce our learned ability to rationally process and check what it puts out, just like I’ve lost the ability to check if my calculator is lying to me. And not to get all dystopian here... but what if what that tool is telling me is true is, for whatever reason, not.](https://www.reddit.com/r/Futurology/comments/1ivetnk/)
+
+- [Using it makes me think even more considering how even their better Copilot models still create a mess. It’s often like trying to get a three-year-old to do simple tasks.](https://www.reddit.com/r/Futurology/comments/1ivetnk/comment/me68ujj/)
+
+- [Well, using a calculator has killed the need to do most math and AI will kill the need to think.](https://www.reddit.com/r/Futurology/comments/1ivetnk/comment/me7sy6z/)
+
+- [On the LLM: It’s too positive. I don’t always want it to follow my ideas and I don’t want to hear how much my feedback is appreciated. Act like a machine.](https://news.ycombinator.com/item?id=44817015)
diff --git a/quotes3.md b/quotes3.md
new file mode 100644
index 0000000..6444e47
--- /dev/null
+++ b/quotes3.md
@@ -0,0 +1,8 @@
+- [“LLMs just spew words. It just so happens that human beings can decode them into something related, useful, and meaningful surprisingly often.”](https://news.ycombinator.com/item?id=45061706)
+- [“Spicy autocomplete is still spicy autocomplete.”](https://news.ycombinator.com/item?id=44208996)
+- [“The thing is ‘spicy’ or ‘glorified’ autocomplete are not actually bad labels; they are autocomplete machines that are very good up to the point of convincing people that they think.”](https://news.ycombinator.com/item?id=44213190)
+- [“It’s a human language calculator. You’re imparting magical qualities of general understanding to regression-based function approximation. They ‘fit’ the data.”](https://news.ycombinator.com/item?id=36032576)
+- [“LLM is just regurgitating stuff as a principle.”](https://news.ycombinator.com/item?id=41901696)
+- [“Those people—whether they can write an essay or not, whether they can use the word ‘consciousness’ or not—are still fundamentally alive because we share a grounded, lived, multi-sensory, social reality. And ChatGPT does not, not in the same way. Anything that it expresses now is only a mimicry of that.”](https://news.ycombinator.com/item?id=38286688)
+- [“LM avoids marginality and originality altogether by its design… If the problem was to design the most average programming language with no purpose, no market niche, and no technological context – then GPT-4 is clearly a winner.”](https://news.ycombinator.com/item?id=35178491)
+- [“LLMs develop no understanding of truth or lie. They just have a statistical model of what words go with what other words, and are plucking the next word based on that…”](https://news.ycombinator.com/item?id=34808829)