main
oscar 11 months ago
parent 72bd5f9ba7
commit 9516890416

164
.gitignore vendored

@ -1,160 +1,4 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Content
storysaver/
facebook/
media/

@ -0,0 +1,17 @@
you have not failed enough
you care what other people think
you think youre smarter than everyone
you lack curiousity
you dont ask enough qestions
you cant handle the truth
you dont see opportunities
resell to the people you already sold
staff either save you time or make you money
sell on people's weaknesses, insecurities and addictions
make people think they NEED your product
any business can be started with no money
business is money IN and not money OUT
take money, not make money
use whatever you've got
act with speed

99
_todo

@ -0,0 +1,99 @@
fix recorder
recover missing streams
re-encode all videos to 10 seconds
scan last dood videos with missing download_link
delete all "deleted" self-hosted videosdump all useless self-hosted videos to mix/dood
fix camsrip crawler
move camsrip to new server
finish converting download_link faster
check failed mixdrop uploads
add cache for .ts files bunny/nvme
manage all domains
pay for onlycats domain
onlyfans crawler
telegram crawler
optimize instagram crawler
do ethernet cables
get access to xn
paint light switches/phone case/pc cases
microscope shorts
fix / sell scooter
paperless ngx
do hand scan
go to psychiatrist
do general checkup on body
fix and brush teeth
SFP and NTP
phising ig
xss tate shop
finish and improve opsec
delete internet t
clean cry
warm up pay
install wasabi
install / try gaming linux
finish atrazat on
set up nas
dump last stories
photoshop originals
finish ab recoverer/cleaner
fix controller
fix hdd 100% load on video server
replace exoclick
fake comments bot
advanced tags/streamer data bot
self host all thumbs with bunny
reupload all dmca'd videos with new id's
generate shorts
use user's tokens to record private shows
create alert system
set up streaming server
minimize amount of scripts i need
normalize database
load balancers for web server
set up recu downloader
handle premium expired / purchases and upgrades
create bunny-like database and api for videos
save file sizes for videos
add payment options like paypal, usdt and more
re-generate thumbs for all videos self-hosted
download all mixdrop/dood/xpo videos
add streamate and cherrytv to recorder and website
delete stripchat dupes
delete "fav" dupes
blacklist ruta and other dmca agencies's crawlers
send emails to potential premiums
fix streamers db having 2 queries with and wuthout gender
create storage manager for recorder
visualize nginx logs to track dmca bots
append all cutoff streams
add ssh keys
frontend:
add forums
add width sections for video player
coins/credit system (basically affiliate)
enable user uploaded content
performer accounts
advanced search system
affiliate system - optimize and create a panel where i can easily manage all
sort by dmca and most popular on /profile
change comments, follow and save to js
add payment options
optimize history/following
create contests and affiliates for premium
"copy" saved videos
keep views uncached on main page
add heatmap for player
fix missing animated thumbs in saved page
fix duplicates in saved videos page
add ip logging for security
require phone numbers for logging in?
add recu affiliate?
fix history dupes
try node.js to get the mp4 url from mixdrop
add profile pictures in search
add collections
mark premium videos
add credit card payment with skrill or others

@ -0,0 +1,62 @@
import os
import tarfile
from datetime import datetime
import sys # Import sys for command line arguments
from BunnyCDN.Storage import Storage
def is_hidden(path):
"""
Check if the given path is a hidden folder or file.
"""
return path.startswith('.') or '/.' in path
def should_exclude(path, excluded_items):
"""
Check if the given path should be excluded.
"""
if is_hidden(path):
return True
for item in excluded_items:
if path.startswith(item):
return True
return False
def backup(folder_path, excluded_folders=[], excluded_files=[]):
"""
Create a compressed backup of the specified folder, excluding specified items and hidden folders.
"""
timestamp = int(datetime.timestamp(datetime.now()))
backup_file = os.path.join(folder_path, f'backup-{timestamp}.tar.gz')
with tarfile.open(backup_file, "w:gz") as tar:
for root, dirs, file_names in os.walk(folder_path):
if should_exclude(root, excluded_folders):
continue
for file_name in file_names:
file_path = os.path.join(root, file_name)
if should_exclude(file_path, excluded_files):
continue
print("Adding %s" % file_path)
tar.add(file_path, arcname=os.path.relpath(file_path, start=folder_path))
return backup_file
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python script.py <folder_path>")
sys.exit(1)
folder_path = sys.argv[1]
if not os.path.isdir(folder_path):
print(f"Error: The folder '{folder_path}' does not exist.")
sys.exit(1)
backup_file = backup(folder_path)
obj_storage = Storage('99f4c72b-2674-4e6a-a1825c269cc0-b959-48a1', 'ab-backups')
obj_storage.PutFile(backup_file, f'backups/{os.path.basename(backup_file)}')
print("Backup and upload successful.")

@ -42,19 +42,13 @@ def upload_file(filepath, username, media_type='image', post_type='story', times
duration = funcs.get_video_duration(filepath) if media_type == 'video' else 0
if "FB_IMG" in filename:
media_id = filename.split("_")[2].split(".")[0]
else:
media_id = uuid.uuid4().hex
if "FB_IMG" in filename: media_id = filename.split("_")[2].split(".")[0]
else: media_id = uuid.uuid4().hex
dirtype = funcs.determine_post_type(filepath, media_type)
server_path = os.path.join('media', dirtype, username, f'{media_id}{file_extension}')
try:
obj_storage.PutFile(filepath, server_path)
except Exception as e:
print(f"Failed to upload {filepath} to storage: {e}")
return False
obj_storage.PutFile(filepath, server_path)
file_url = f"https://storysave.b-cdn.net/{server_path}"

@ -5,25 +5,20 @@ from PIL import Image
def dump_instagram(folder_path):
for filename in os.listdir(folder_path):
parts = filename.split('_')
parts = filename.split('~')
if len(parts) < 4:
continue
try:
username = '_'.join(parts[:-3])
timestamp = int(parts[-3])
media_id = int(parts[-2])
user_id = int(parts[-1].split('.')[0])
except:
print(f"Invalid filename: {filename}")
os.rename(os.path.join(folder_path, filename), os.path.join(folder_path, 'sort', filename))
continue
username = parts[0]
timestamp = parts[1]
media_id = parts[2]
user_id = parts[3].split('_')[-1].split('.')[0]
filepath = os.path.join(folder_path, filename)
UploadMedia(username=username, filepath=filepath, media_id=media_id, timestamp=timestamp, user_id=user_id)
def UploadMedia(filepath, username, media_id=None, timestamp=None, user_id=None):
def UploadMedia(filepath, username, media_id, timestamp = None, user_id = None):
filename = os.path.basename(filepath)
file_extension = os.path.splitext(filename)[1].lower()
@ -72,6 +67,9 @@ if __name__ == '__main__':
newCursor.execute("SELECT media_id FROM media WHERE platform='instagram' AND media_id IS NOT NULL")
existing_files = [image[0] for image in newCursor.fetchall()]
filePath = 'storysaver/3385905371606651364.jpg'
UploadMedia(filepath=filePath, username='unknown', media_id=3385905371606651364)
dump_instagram('storysaver/')
print("Processing completed.")

@ -40,8 +40,8 @@ def upload_file(filepath, username):
os.remove(filepath)
return False
query = "INSERT IGNORE INTO media (username, media_type, media_url, width, height, platform, hash, filename, duration) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
values = (username, media_type, file_url, width, height, 'tiktok', file_hash, filename, duration)
query = "INSERT IGNORE INTO media (username, media_type, media_url, width, height, platform, hash, filename, duration, media_id) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
values = (username, media_type, file_url, width, height, 'tiktok', file_hash, filename, duration, media_id)
newCursor.execute(query, values)
newDB.commit()

@ -0,0 +1,10 @@
ChallengeResolve: Unknown step_name "submit_phone" for "olivercury" in challenge resolver: {'step_name': 'submit_phone', 'step_data': {'phone_number': '+972522618221', 'show_whatsapp_otp_choice': True, 'whatsapp': False}, 'flow_render_type': 3, 'bloks_action': 'com.instagram.challenge.navigation.take_challenge', 'cni': 18436897147040850, 'challenge_context': 'Af6pVKkiomiOMxWvLzouGukazqMMhFbzNERezSMhBU-dHrO_DNGfTJpUPp8-di6HHm8WfAfL6_PQaLkV6sOkb6CC68ugfQtLMd3OgMVasZkOI5O6YdnoqMtBzNBGd944VtUNEEkl9bNVM5yQbfMskCuKTUf7AQOIYD2zEuvd8wC-AUBPziP105a1xq3GbaSeyJ9QnEJHHWgpFenBURUNbdLvQ9lzs5j62zCxo_0fe4Fw', 'challenge_type_enum_str': 'SMS', 'status': 'ok'}
requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://www.instagram.com/graphql/query/?variables=%7B%22user_id%22%3A%226208321762%22%2C%22include_reel%22%3Atrue%7D&query_hash=ad99dd9d3646cc3c0dda65debcd266a7
During handling of the above exception, another exception occurred:
instagrapi.exceptions.ClientUnauthorizedError: 401 Client Error: Unauthorized for url: https://www.instagram.com/graphql/query/?variables=%7B%22user_id%22%3A%226208321762%22%2C%22include_reel%22%3Atrue%7D&query_hash=ad99dd9d3646cc3c0dda65debcd266a7
During handling of the above exception, another exception occurred:
requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://i.instagram.com/api/v1/users/6208321762/info/

@ -0,0 +1,82 @@
from BunnyCDN.Storage import Storage
from datetime import datetime
import os, config, funcs
from PIL import Image
def dump_instagram(folder_path):
for filename in os.listdir(folder_path):
parts = filename.split('_')
try:
username = '_'.join(parts[:-2]) # Join all except last two
timestamp = int(parts[-2]) # Second last is timestamp
user_id = int(parts[-1].split('.')[0]) # Last part before extension is user_id
except Exception as e:
print(f"Invalid filename: {filename}. Error: {e}")
continue
filepath = os.path.join(folder_path, filename)
mediatype = funcs.get_media_type(filename)
post_type = funcs.determine_post_type(filepath, mediatype)
UploadMedia(username=username, media_type=mediatype, filepath=filepath, post_type=post_type, timestamp=timestamp, user_id=user_id)
def UploadMedia(filepath, username, media_type='image', post_type='story', timestamp=None, user_id=None):
if 'tero' in username:
pass
filename = os.path.basename(filepath)
file_extension = os.path.splitext(filename)[1].lower()
file_hash = funcs.calculate_file_hash(filepath)
duration = funcs.get_video_duration(filepath) if media_type == 'video' else 0
post_date = datetime.fromtimestamp(int(timestamp)) if timestamp else datetime.now()
dirtype = funcs.determine_post_type(filepath, media_type)
server_path = f'media/{dirtype}/{username}/{file_hash}{file_extension}'
file_url = f"https://storysave.b-cdn.net/{server_path}"
if file_hash in existing_files:
print('Duplicate file detected. Removing...')
os.remove(filepath)
return True
obj_storage.PutFile(filepath, server_path)
if media_type == 'image':
with Image.open(filepath) as img:
width, height = img.size
else:
width, height = funcs.get_video_dimensions(filepath)
query = "INSERT IGNORE INTO media (username, media_type, media_url, width, height, post_type, date, user_id, hash, filename, duration) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
values = (username, media_type, file_url, width, height, post_type, post_date, user_id, file_hash, filename, duration)
newCursor.execute(query, values)
newDB.commit()
print(f'[{newCursor.rowcount}] records updated. File {filename} uploaded to {file_url}')
os.remove(filepath)
return True
if __name__ == '__main__':
print('Starting processing...')
newDB, newCursor = config.gen_connection()
obj_storage = Storage('345697f9-d9aa-4a6b-a5ec8bffc16d-ceaf-453e', 'storysave')
newCursor.execute("SELECT hash FROM media WHERE platform='instagram' AND hash IS NOT NULL")
existing_files = [image[0] for image in newCursor.fetchall()]
dump_instagram('storysaver/missingdata/')
print("Processing completed.")

@ -0,0 +1,19 @@
import os, config, funcs
if __name__ == '__main__':
print('Starting processing...')
newDB, newCursor = config.gen_connection()
newCursor.execute("SELECT hash FROM media WHERE platform='instagram' AND hash IS NOT NULL")
existing_files = [image[0] for image in newCursor.fetchall()]
files = os.listdir('storysaver/missingdata/')
for file in files:
filePath = os.path.join('storysaver/missingdata/', file)
file_hash = funcs.calculate_file_hash(filePath)
if file_hash in existing_files:
print(f'Duplicate file detected. Removing {filePath}...')
os.rename(filePath, f'storysaver/dupes/{file}')

@ -1,9 +1,6 @@
https://rule34.xxx/index.php?page=post&s=view&id=8829721
https://rule34.xxx/index.php?page=post&s=view&id=9416031
https://rule34.xxx/index.php?page=post&s=view&id=10105236
https://rule34.xxx/index.php?page=post&s=list&tags=dzooworks+animated
https://rule34.xxx/index.php?page=post&s=list&tags=sageofosiris+animated
https://rule34.xxx/index.php?page=post&s=list&tags=shirami_%28artist%29+animated+
https://rule34.xxx/index.php?page=post&s=view&id=9885293
https://rule34.xxx/index.php?page=post&s=view&id=10034199
https://rule34.xxx/index.php?page=post&s=view&id=10102882
@ -14,26 +11,14 @@ https://rule34.xxx/index.php?page=post&s=view&id=8805292
https://rule34.xxx/index.php?page=post&s=view&id=9279505
https://rule34.xxx/index.php?page=post&s=view&id=9443010
https://rule34.xxx/index.php?page=post&s=view&id=9609049
https://rule34.xxx/index.php?page=post&s=list&tags=ivan_e_recshun+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=chloeangelva+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=zmsfm+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=d.va+animated
https://rule34.xxx/index.php?page=post&s=list&tags=youngiesed
https://rule34.xxx/index.php?page=post&s=list&tags=lerico213+animated+
https://rule34.xxx/index.php?page=post&s=view&id=9955496
https://rule34.xxx/index.php?page=post&s=list&tags=lerico213+animated+
https://rule34.xxx/index.php?page=post&s=view&id=9745604
https://rule34.xxx/index.php?page=post&s=view&id=9669668
https://rule34.xxx/index.php?page=post&s=list&tags=speedosausage
https://rule34.xxx/index.php?page=post&s=view&id=9670073
https://rule34.xxx/index.php?page=post&s=list&tags=animated+cute
https://rule34.xxx/index.php?page=post&s=view&id=9900309
https://rule34.xxx/index.php?page=post&s=view&id=10114922
https://rule34.xxx/index.php?page=post&s=list&tags=realistic+animated+
https://rule34.xxx/index.php?page=post&s=view&id=9900309
https://rule34.xxx/index.php?page=post&s=list&tags=checkpik+animated+
https://rule34.xxx/index.php?page=post&s=view&id=9530599
https://rule34.xxx/index.php?page=post&s=list&tags=pewposterous+animated+
https://rule34.xxx/index.php?page=post&s=view&id=7983487
https://rule34.xxx/index.php?page=post&s=view&id=9664965
https://rule34.xxx/index.php?page=post&s=view&id=10025400
@ -41,12 +26,17 @@ https://rule34.xxx/index.php?page=post&s=view&id=4710252
https://rule34.xxx/index.php?page=post&s=view&id=8858439
https://rule34.xxx/index.php?page=post&s=view&id=9423465
https://rule34.xxx/index.php?page=post&s=list&tags=checkpik+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=pewposterous+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=realistic+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=speedosausage
https://rule34.xxx/index.php?page=post&s=list&tags=animated+cute
https://rule34.xxx/index.php?page=post&s=list&tags=lerico213+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=ivan_e_recshun+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=chloeangelva+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=zmsfm+animated+
https://rule34.xxx/index.php?page=post&s=list&tags=d.va+animated
https://rule34.xxx/index.php?page=post&s=list&tags=youngiesed
https://rule34.xxx/index.php?page=post&s=list&tags=dzooworks+animated
https://rule34.xxx/index.php?page=post&s=list&tags=sageofosiris+animated
https://rule34.xxx/index.php?page=post&s=list&tags=shirami_%28artist%29+animated+

@ -7,14 +7,8 @@ from BunnyCDN.Storage import Storage
from instagrapi import Client
from PIL import Image
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
}
proxies = {
"http": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/",
"https": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/",
}
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"}
proxies={"http": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/","https": "http://yehyuxsl-rotate:4tl5bvrwkz5e@p.webshare.io:80/"}
def file_hash(filename, hash_algo="sha256"):
"""
@ -79,39 +73,29 @@ def login(force=False):
def parse_media_data(media_item):
mediaTypes = {1: "image", 2: "video", 8: "album"}
mediaTypes = {1: 'image', 2: 'video', 8: 'album'}
try:
taken_at = media_item.taken_at
except:
taken_at = None
try:
post_type = media_item.product_type
except:
post_type = None
try:taken_at = media_item.taken_at
except:taken_at = None
try:post_type = media_item.product_type
except:post_type = None
mediaInfo = {
"taken_at": taken_at,
"post_type": post_type,
"media_type": mediaTypes[media_item.media_type],
}
mediaInfo = {'taken_at': taken_at, 'post_type' : post_type, 'media_type': mediaTypes[media_item.media_type]}
if media_item.media_type == 1: # Image
mediaInfo["media_id"] = int(media_item.pk)
mediaInfo["fileURL"] = media_item.thumbnail_url
mediaInfo["filename"] = f"{media_item.pk}.jpg"
mediaInfo['media_id'] = int(media_item.pk)
mediaInfo['fileURL'] = media_item.thumbnail_url
mediaInfo['filename'] = f"{media_item.pk}.jpg"
elif media_item.media_type == 2: # Video
mediaInfo["media_id"] = int(media_item.pk)
mediaInfo["fileURL"] = media_item.video_url
try:
mediaInfo["duration"] = media_item.video_duration
except:
mediaInfo["duration"] = 0
mediaInfo["filename"] = f"{media_item.pk}.mp4"
mediaInfo['media_id'] = int(media_item.pk)
mediaInfo['fileURL'] = media_item.video_url
try:mediaInfo['duration'] = media_item.video_duration
except:mediaInfo['duration'] = 0
mediaInfo['filename'] = f"{media_item.pk}.mp4"
else:
print(f"Unsupported media type with ID {media_item.pk}")
return None
return mediaInfo
@ -175,19 +159,7 @@ def add_media_to_db(mediaInfo):
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
data = (
user_id,
username,
date,
media_type,
post_type,
media_url,
duration,
width,
height,
media_id,
filehash,
)
data = (user_id, username, date, media_type, post_type, media_url, duration, width, height, media_id, filehash)
cursor.execute(query, data)
db.commit()
@ -225,11 +197,20 @@ if __name__ == "__main__":
db, cursor = config.gen_connection()
cursor.execute(
"SELECT instagram_username, instagram_user_id, favorite FROM following ORDER BY favorite DESC, id DESC;"
)
cursor.execute("SELECT instagram_username, instagram_user_id, favorite FROM following ORDER BY id DESC;")
following = cursor.fetchall()
new_following = []
for user in following:
username, user_id, favorite = user
if bool(favorite):
new_following.insert(0, user)
else:
new_following.append(user)
following = new_following
cursor.execute("SELECT media_id FROM media WHERE media_id IS NOT NULL;")
existing_files = [media[0] for media in cursor.fetchall()]
@ -258,36 +239,29 @@ if __name__ == "__main__":
firstImport = True
user_id = client.user_id_from_username(username)
actionsTaken += 1
cursor.execute(
"UPDATE following SET instagram_user_id = %s WHERE instagram_username = %s;",
(user_id, username),
)
cursor.execute("UPDATE following SET instagram_user_id = %s WHERE instagram_username = %s;", (user_id, username))
db.commit()
print(f"Updated user ID for {username} to {user_id}")
#################### profile picture ####################
profilePath = os.path.join(
"media", "profile", username, "profile.jpg"
)
profilePath = os.path.join('media', 'profile', username, 'profile.jpg')
profileURL = client.user_info(user_id).profile_pic_url_hd
download_file(profileURL, profilePath)
fileHash = file_hash(profilePath)
serverPath = os.path.join(
os.path.dirname(profilePath), f"{fileHash}.jpg"
)
serverPath = os.path.join(os.path.dirname(profilePath), f"{fileHash}.jpg")
upload_to_storage(profilePath, serverPath)
mediaInfo = {
"username": username,
"user_id": user_id,
"media_id": None,
"media_type": "image",
"post_type": "profile",
"media_url": f"https://storysave.b-cdn.net/{serverPath}",
"duration": 0,
"hash": fileHash,
'username': username,
'user_id': user_id,
'media_id': None,
'media_type': 'image',
'post_type': 'profile',
'media_url': f"https://storysave.b-cdn.net/{serverPath}",
'duration': 0,
'hash': fileHash
}
process_media(mediaInfo, profilePath)
@ -307,24 +281,14 @@ if __name__ == "__main__":
actionsTaken += 1
for highlight in highlights:
try:
highlight_items = client.highlight_info_v1(
highlight.pk
).items # API request
highlight_items = client.highlight_info_v1(highlight.pk).items # API request
actionsTaken += 1
except:
print(
f"Failed to get highlight items for {highlight.pk}"
)
print(f"Failed to get highlight items for {highlight.pk}")
time.sleep(5)
media_ids = [item.pk for item in highlight_items]
executor.submit(
insert_highlight_items,
media_ids,
highlight.pk,
highlight.title,
user_id,
)
executor.submit(insert_highlight_items, media_ids, highlight.pk, highlight.title, user_id)
stories.extend(highlight_items)
# process stories and highlight stories
@ -342,12 +306,8 @@ if __name__ == "__main__":
mediaInfo["username"] = username
mediaInfo["post_type"] = "story"
if mediaInfo["fileURL"] and mediaInfo["filename"]:
filePath = os.path.join(
"media", "stories", username, mediaInfo["filename"]
)
mediaInfo["media_url"] = (
f"https://storysave.b-cdn.net/{filePath}"
)
filePath = os.path.join('media', 'stories', username, mediaInfo['filename'])
mediaInfo['media_url'] = f"https://storysave.b-cdn.net/{filePath}"
download_file(mediaInfo["fileURL"], filePath)
process_media(mediaInfo, filePath)
@ -362,7 +322,7 @@ if __name__ == "__main__":
#################### posts ####################
print("Checking: Posts")
medias = client.user_medias(user_id, 36) # API request
medias = client.user_medias(user_id, 36) # API request
actionsTaken += 1
posts = []
@ -383,13 +343,9 @@ if __name__ == "__main__":
mediaInfo["user_id"] = user_id
mediaInfo["username"] = username
mediaInfo["post_type"] = "post"
if mediaInfo["fileURL"] and mediaInfo["filename"]:
filePath = os.path.join(
"media", "posts", username, mediaInfo["filename"]
)
mediaInfo["media_url"] = (
f"https://storysave.b-cdn.net/{filePath}"
)
if mediaInfo['fileURL'] and mediaInfo['filename']:
filePath = os.path.join('media', 'posts', username, mediaInfo['filename'])
mediaInfo['media_url'] = f"https://storysave.b-cdn.net/{filePath}"
download_file(mediaInfo["fileURL"], filePath)
process_media(mediaInfo, filePath)

Loading…
Cancel
Save