diff --git a/README.md b/README.md index 706ef05..39ba7bd 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Cookie: [...] _twitter_sess=BAhD[...]; auth_token=24fa[...] After you've received your twitter archive and edited a request-header file for a current session (as explained above), we can call our script: ``` -de-x.py tweets.js request-headers.txt +de-x.py tweets.js request-headers.txt 14 ``` ## Background @@ -45,3 +45,7 @@ Twitter's `DeleteTweet` API is not restricted, and can be called without registe There is no *One Click Delete Everything* tool available and it never will. This is due to Twitter's massive restrictions on using their APIs to control your own data. Of course, they would like to keep your data. Forever. However, If you don't want your data being archived at Twitter until global heat finally also kills all machines on planet earth, you should spend some time and effort to delete them - free of charge. Maybe it is possible to build a *One Click Delete Everything* tool using this approach, and maybe it is even user-friendly. I know, the one above is not user friendly, but hopefully this readme is, and hopefully it enables your daughter/neighbor/friend to assist with deleting stuff from the Internet that you don't want to see there anymore. In my opinion, everyone should have the right and the opportunity to delete their own content from the Internet without problems, without barriers and without paying money; regardless of their origin. +## Issues + +* might need to copy user-agent header field, too +* might need to get header fields from non-vpn connection so the used ip is the same? diff --git a/de-x.py b/de-x.py index 821e2db..fd47772 100644 --- a/de-x.py +++ b/de-x.py @@ -6,10 +6,15 @@ # # Please see README.md for more information ## - +#%% import sys import json import requests +from datetime import datetime, timedelta, UTC + +format_string = "%a %b %d %H:%M:%S +0000 %Y" +min_age_days = 14 +current_time = datetime.now(UTC) def get_tweet_ids(json_data): @@ -17,7 +22,11 @@ def get_tweet_ids(json_data): data = json.loads(json_data) for d in data: - result.append(d['tweet']['id_str']) + dt = datetime.strptime(d['tweet']['created_at'], format_string).replace(tzinfo=UTC) + age = current_time - dt + #print("dt=%s, age=%d days" % (dt, age.days)) + if age.days >= min_age_days: + result.append(d['tweet']['id_str']) return result @@ -41,11 +50,14 @@ def parse_req_headers(request_file): return sess def main(ac, av): + global min_age_days - if(ac != 3): - print(f"[!] usage: {av[0]} ") + if(ac != 4): + print(f"[!] usage: {av[0]} ") return + min_age_days = int(av[3]) + f = open(av[1], encoding='UTF-8') raw = f.read() f.close() @@ -64,7 +76,7 @@ def main(ac, av): def delete_tweet(session, tweet_id): print(f"[*] delete tweet-id {tweet_id}") - delete_url = "https://twitter.com/i/api/graphql/VaenaVgh5q5ih7kvyVjgtg/DeleteTweet" + delete_url = "https://x.com/i/api/graphql/VaenaVgh5q5ih7kvyVjgtg/DeleteTweet" data = {"variables":{"tweet_id":tweet_id,"dark_request":False},"queryId":"VaenaVgh5q5ih7kvyVjgtg"} # set or re-set correct content-type header @@ -75,7 +87,10 @@ def delete_tweet(session, tweet_id): return - -if __name__ == '__main__': - +# detect VS Code or Jupyter environment - to allow running interactively: +if sys.argv[0].endswith('\\ipykernel_launcher.py'): + main(4, ['de-x.py', 'tweets.json', 'auth.txt', 14]) +elif __name__ == '__main__': main(len(sys.argv), sys.argv) + + \ No newline at end of file