Initial commit

main
Your Name 14 hours ago
commit 1f2cfe86d8

2
.gitattributes vendored

@ -0,0 +1,2 @@
# Auto detect text files and perform LF normalization
* text=auto

@ -0,0 +1,15 @@
https://webcamrips.to/
https://livecamrips.tv/
https://www.showcamrips.com/
Same site but home page is unique https://8teen.cam/
https://hdstream.ing/
https://livecamrips1.com/
https://allmy.cam/
https://www.lovecamporn.com/
https://camsmut.com/
https://cumcams.cc/
https://striphub.cam/
https://chaturflix.cam/

@ -0,0 +1,316 @@
<!DOCTYPE html>
<html>
<head>
<meta charSet="utf-8"/>
<meta name="viewport" content="width=device-width"/>
<title>MiraVelle Stripchat recording at MiraVelle@2025-10-21-05-07.mp4</title>
<meta name="title" content="MiraVelle Stripchat recording at MiraVelle@2025-10-21-05-07.mp4"/>
<meta name="description" content="Cam model MiraVelle show recording on MiraVelle@2025-10-21-05-07.mp4. StripHub - Watch your favourite hot models live reordings for free."/>
<meta name="keywords" content="MiraVelle Cam recordings,MiraVelle Cam show, MiraVelle Cam recordings, MiraVelle live shows,MiraVelle Cam recordings, MiraVelle stripchat show, MiraVelle stripchat recordings, MiraVelle stripchat shows, Cam, camgirl, Cam performer, Stripchat Recordings, Xhamsterlive Recordings, hot model, adult cam, adult performer, camgirls, webcam tube, free cam videos, XXX tube, camwhores, cam show, camwhores alternative, Recurbate alternative"/>
<meta property="og:type" content="website"/>
<meta property="og:site_name" content="recordjerk"/>
<meta property="og:url" content="https://recordjerk.cam/play/68f731ea62f66877cc80e54f"/>
<meta property="og:title" content="MiraVelle Stripchat recording at MiraVelle@2025-10-21-05-07.mp4"/>
<meta property="og:description" content="Cam model MiraVelle show recording on MiraVelle@2025-10-21-05-07.mp4. StripHub - Watch your favourite hot models live reordings for free."/>
<meta property="og:image" content="https://static.recordjerk.cam/media/images/MiraVelle/fb0aaab4-0078-4152-9800-df21fe7e1366"/>
<meta property="og:image:width" content="1200"/>
<meta property="og:image:height" content="630"/>
<meta property="og:locale" content="en_US"/>
<meta property="article:publisher" content=""/>
<meta name="twitter:card" content="summary_large_image"/>
<meta name="twitter:site" content="@Chaturflix"/>
<meta name="twitter:title" content="MiraVelle Stripchat recording at MiraVelle@2025-10-21-05-07.mp4"/>
<meta name="twitter:description" content="Cam model MiraVelle show recording on MiraVelle@2025-10-21-05-07.mp4. StripHub - Watch your favourite hot models live reordings for free."/>
<meta name="twitter:image" content="https://static.recordjerk.cam/media/images/MiraVelle/fb0aaab4-0078-4152-9800-df21fe7e1366"/>
<meta name="twitter:image:alt" content="MiraVelle Stripchat recording at MiraVelle@2025-10-21-05-07.mp4"/>
<meta name="robots" content="index, follow"/>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
<meta http-equiv="content-language" content="en"/>
<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png"/>
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png"/>
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png"/>
<link rel="manifest" href="/site.webmanifest"/>
<script async="" src="https://www.googletagmanager.com/gtag/js?id=G-FL2M7S43YZ"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag() {
dataLayer.push(arguments);
}
gtag('js', new Date());
gtag('config', 'G-FL2M7S43YZ');
</script>
<meta name="next-head-count" content="31"/>
<link rel="preload" href="/_next/static/css/e5db6c01610a1415.css" as="style"/>
<link rel="stylesheet" href="/_next/static/css/e5db6c01610a1415.css" data-n-g=""/>
<noscript data-n-css=""></noscript>
<script defer="" nomodule="" src="/_next/static/chunks/polyfills-c67a75d1b6f99dc8.js"></script>
<script src="/_next/static/chunks/webpack-be20342c33773a33.js" defer=""></script>
<script src="/_next/static/chunks/framework-0c7baedefba6b077.js" defer=""></script>
<script src="/_next/static/chunks/main-7af7d5359a6145de.js" defer=""></script>
<script src="/_next/static/chunks/pages/_app-744bafa374e79f96.js" defer=""></script>
<script src="/_next/static/chunks/75fc9c18-165f0fc0c463a090.js" defer=""></script>
<script src="/_next/static/chunks/pages/play/%5Bid%5D-38819b96a582d9ce.js" defer=""></script>
<script src="/_next/static/-vZbDXryM1mmBj6cDtzb_/_buildManifest.js" defer=""></script>
<script src="/_next/static/-vZbDXryM1mmBj6cDtzb_/_ssgManifest.js" defer=""></script>
</head>
<body>
<div id="__next">
<script>
!function() {
try {
var d = document.documentElement
, c = d.classList;
c.remove('light', 'dark');
var e = localStorage.getItem('theme');
if (e) {
c.add(e || '')
} else {
c.add('dark');
}
if (e === 'light' || e === 'dark' || !e)
d.style.colorScheme = e || 'dark'
} catch (t) {}
}();
</script>
<main class="min-h-screen flex flex-col ">
<nav class="bg-slate-100 dark:bg-dark-200">
<div class="w-full font-semibold flex justify-between items-center vf-container py-3 ">
<div class="flex items-center gap-5 pr-5 flex-auto lg:flex-none">
<a href="/" class=" font-bold text-3xl">
<span class="text-primary">Strip</span>
<span class="text-white">Hub</span>
</a>
</div>
<div class=" w-1/3">
<div class="">
<form class="relative w-full font-normal text-sm flex-auto">
<input class="peer border dark:bg-dark-100 bg-slate-200 px-3.5 py-2.5 border-primary border-opacity-0 rounded transition-all duration-200 focus:border-opacity-100 outline-none w-full placeholder:text-gray-900 dark:placeholder:text-gray-300 placeholder:tracking-wider placeholder:opacity-60" placeholder="Search Performers/Tags" value=""/>
<div class="peer-placeholder-shown:hidden peer-placeholder-shown:text-current absolute right-2.5 top-0 flex justify-center items-center h-full gap-x-2">
<button type="submit" class=" flex justify-center items-center ">
<span class="text-xs text-gray-400 border-[1px] border-slate-300 px-1 rounded-sm">view all</span>
</button>
<button type="reset" class=" flex justify-center items-center ">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor" aria-hidden="true" class="w-6 h-6">
<path fill-rule="evenodd" d="M5.47 5.47a.75.75 0 011.06 0L12 10.94l5.47-5.47a.75.75 0 111.06 1.06L13.06 12l5.47 5.47a.75.75 0 11-1.06 1.06L12 13.06l-5.47 5.47a.75.75 0 01-1.06-1.06L10.94 12 5.47 6.53a.75.75 0 010-1.06z" clip-rule="evenodd"></path>
</svg>
</button>
</div>
<span class="peer-placeholder-shown:flex hidden absolute right-2.5 top-0 justify-center items-center h-full peer-placeholder-shown:text-current ">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor" aria-hidden="true" class="w-5 h-5">
<path fill-rule="evenodd" d="M10.5 3.75a6.75 6.75 0 100 13.5 6.75 6.75 0 000-13.5zM2.25 10.5a8.25 8.25 0 1114.59 5.28l4.69 4.69a.75.75 0 11-1.06 1.06l-4.69-4.69A8.25 8.25 0 012.25 10.5z" clip-rule="evenodd"></path>
</svg>
</span>
</form>
</div>
</div>
<div class="">
<div class="hidden text-sm lg:flex font-light items-center gap-x-5">
<a href="https://theporndude.com/" target="_blank" class="mr-10 font-medium text-orange-400">ThePornDude</a>
<button>
<span slot="dark" class="">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor" aria-hidden="true" class="w-4 hover:text-primary transition-all duration-300 ease-in-out">
<path d="M12 2.25a.75.75 0 01.75.75v2.25a.75.75 0 01-1.5 0V3a.75.75 0 01.75-.75zM7.5 12a4.5 4.5 0 119 0 4.5 4.5 0 01-9 0zM18.894 6.166a.75.75 0 00-1.06-1.06l-1.591 1.59a.75.75 0 101.06 1.061l1.591-1.59zM21.75 12a.75.75 0 01-.75.75h-2.25a.75.75 0 010-1.5H21a.75.75 0 01.75.75zM17.834 18.894a.75.75 0 001.06-1.06l-1.59-1.591a.75.75 0 10-1.061 1.06l1.59 1.591zM12 18a.75.75 0 01.75.75V21a.75.75 0 01-1.5 0v-2.25A.75.75 0 0112 18zM7.758 17.303a.75.75 0 00-1.061-1.06l-1.591 1.59a.75.75 0 001.06 1.061l1.591-1.59zM6 12a.75.75 0 01-.75.75H3a.75.75 0 010-1.5h2.25A.75.75 0 016 12zM6.697 7.757a.75.75 0 001.06-1.06l-1.59-1.591a.75.75 0 00-1.061 1.06l1.59 1.591z"></path>
</svg>
</span>
</button>
<a href="/" class="nav-link">Home</a>
<a href="/performer" class="nav-link">Performers</a>
<a href="https://rec.recordjerk.cam/" class="add-to-record-button">
<div class="border-pink-500 group border-2 text-primary tracking-wider font-semibold rounded-full gap-x-1.5 flex items-center">
<div class="relative w-5 h-5 m-1">
<div class="absolute top-0 left-0 w-full h-full bg-primary animate-pulse rounded-full"></div>
<div class="absolute top-0 left-0 w-full h-full bg-primary animate-ping rounded-full"></div>
</div>
<span class=" pr-3.5 flex-auto text-center">Add model to Recoding</span>
</div>
</a>
</div>
<button class="block lg:hidden border p-2 py-1 rounded">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor" aria-hidden="true" class="w-6">
<path fill-rule="evenodd" d="M3 6.75A.75.75 0 013.75 6h16.5a.75.75 0 010 1.5H3.75A.75.75 0 013 6.75zM3 12a.75.75 0 01.75-.75h16.5a.75.75 0 010 1.5H3.75A.75.75 0 013 12zm0 5.25a.75.75 0 01.75-.75h16.5a.75.75 0 010 1.5H3.75a.75.75 0 01-.75-.75z" clip-rule="evenodd"></path>
</svg>
</button>
</div>
</div>
<div class="sub-menu lg:px-16 bg-gray-1000 lg:hidden dark:text-gray-200 text-gray-800 hidden">
<div class="border-t border-gray-400 gap-2 dark:border-dark-100 flex justify-end px-8 py-4 items-center w-full">
<span>Theme</span>
<button>
<span slot="dark" class="">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="currentColor" aria-hidden="true" class="w-4 hover:text-primary transition-all duration-300 ease-in-out">
<path d="M12 2.25a.75.75 0 01.75.75v2.25a.75.75 0 01-1.5 0V3a.75.75 0 01.75-.75zM7.5 12a4.5 4.5 0 119 0 4.5 4.5 0 01-9 0zM18.894 6.166a.75.75 0 00-1.06-1.06l-1.591 1.59a.75.75 0 101.06 1.061l1.591-1.59zM21.75 12a.75.75 0 01-.75.75h-2.25a.75.75 0 010-1.5H21a.75.75 0 01.75.75zM17.834 18.894a.75.75 0 001.06-1.06l-1.59-1.591a.75.75 0 10-1.061 1.06l1.59 1.591zM12 18a.75.75 0 01.75.75V21a.75.75 0 01-1.5 0v-2.25A.75.75 0 0112 18zM7.758 17.303a.75.75 0 00-1.061-1.06l-1.591 1.59a.75.75 0 001.06 1.061l1.591-1.59zM6 12a.75.75 0 01-.75.75H3a.75.75 0 010-1.5h2.25A.75.75 0 016 12zM6.697 7.757a.75.75 0 001.06-1.06l-1.59-1.591a.75.75 0 00-1.061 1.06l1.59 1.591z"></path>
</svg>
</span>
</button>
</div>
<a href="/">
<div class="link">Home</div>
</a>
<a href="/performer">
<div class="link">Performers</div>
</a>
<a href="/dmca">
<div class="link">DMCA</div>
</a>
<a href="https://theporndude.com/" target="_blank" class="mr-10 font-medium text-pink-400 link">ThePornDude</a>
</div>
</nav>
<div class="flex-auto">
<div class="flex vf-container">
<div class="w-full lg:w-9/12 mt-5 ">
<div class="lg:flex justify-center hidden "></div>
<div class="aspect-video w-full ">
<div class="w-full h-full">
<video id="fluid-player" class="w-full h-full"></video>
</div>
<iframe id="video-iframe" class="w-full h-full" style="display:none" allowfullscreen="" title="Video"></iframe>
</div>
<div class="flex justify-center w-full py-4">
<button disabled="" class="px-4 py-2 text-sm rounded-md bg-primary text-white hover:bg-primary disabled:bg-orange-200 disabled:text-gray-500 disabled:cursor-not-allowed">Skip Ad in 10s</button>
</div>
<div class="p-3 bg-gray-100 dark:bg-dark-100">
<div class="flex items-center justify-between py-2 ">
<div>
<a href="/performer/MiraVelle">
<h1 class="text-lg font-semibold block ">
<span class="text-primary hover:underline ">MiraVelle</span>
Cam recordings
</h1>
</a>
<span class="text-sm ">
<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="1855716a794e7d74747d582a282a2d352928352a2935282d35282f3675682c">[email &#160;protected]</a>
</span>
</div>
<a href="https://mxdrop.to/f/z1jk3p0eckp6rd" target="_blank" rel="noreferrer">
<button class="px-4 py-2 text-sm text-white bg-primary hover:bg-orange-500 rounded-lg flex items-center gap-1">
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" class="w-4 ">
<path stroke-linecap="round" stroke-linejoin="round" d="M3 16.5v2.25A2.25 2.25 0 005.25 21h13.5A2.25 2.25 0 0021 18.75V16.5M16.5 12L12 16.5m0 0L7.5 12m4.5 4.5V3"></path>
</svg>
Download
</button>
</a>
</div>
<div class="video-tags mt-3 mb-2"></div>
</div>
</div>
<div class="hidden md:block lg:w-3/12 p-5 pt-10"></div>
</div>
<div class="vf-container my-10">
<h2 class="font-semibold pb-5">
More records from
<!-- -->
<a class="hover:text-primary" href="/performer/MiraVelle">MiraVelle</a>
</h2>
<div class=" grid grid-cols-1 lg:grid-cols-4 gap-4">
<div class="group video-item">
<a href="/play/68f5ba8c62f66877cc80af43">
<div class=" relative video-card animate-pulse duration-200 bg-gray-400 bg-opacity-60">
<span class="absolute bottom-1 rounded right-1 text-xs group-hover:bg-red-400 group-hover:text-white bg-white bg-opacity-60 text-black px-1 pb-1">01:00:06</span>
</div>
</a>
<a href="/performer/MiraVelle" class=" block pt-1 cursor-pointer font-semibold 2xl:text-lg group-hover:text-primary ">MiraVelle</a>
<span class="block text-xs">
<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="034e6a716255666f6f6643313331362e32332e31332e33322e37352d6e7337">[email &#160;protected]</a>
</span>
</div>
<div class="group video-item">
<a href="/play/68f6c46b62f66877cc80d07d">
<div class=" relative video-card animate-pulse duration-200 bg-gray-400 bg-opacity-60">
<span class="absolute bottom-1 rounded right-1 text-xs group-hover:bg-red-400 group-hover:text-white bg-white bg-opacity-60 text-black px-1 pb-1">29:47</span>
</div>
</a>
<a href="/performer/MiraVelle" class=" block pt-1 cursor-pointer font-semibold 2xl:text-lg group-hover:text-primary ">MiraVelle</a>
<span class="block text-xs">
<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="307d59425166555c5c5570020002051d01001d02001d02021d02091e5d4004">[email &#160;protected]</a>
</span>
</div>
<div class="group video-item">
<a href="/play/68f70f3362f66877cc80dc21">
<div class=" relative video-card animate-pulse duration-200 bg-gray-400 bg-opacity-60">
<span class="absolute bottom-1 rounded right-1 text-xs group-hover:bg-red-400 group-hover:text-white bg-white bg-opacity-60 text-black px-1 pb-1">19:54</span>
</div>
</a>
<a href="/performer/MiraVelle" class=" block pt-1 cursor-pointer font-semibold 2xl:text-lg group-hover:text-primary ">MiraVelle</a>
<span class="block text-xs">
<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="125f7b607344777e7e7752202220273f23223f20233f22213f26273c7f6226">[email &#160;protected]</a>
</span>
</div>
<div class="group video-item">
<a href="/play/68f6b62362f66877cc80ceaf">
<div class=" relative video-card animate-pulse duration-200 bg-gray-400 bg-opacity-60">
<span class="absolute bottom-1 rounded right-1 text-xs group-hover:bg-red-400 group-hover:text-white bg-white bg-opacity-60 text-black px-1 pb-1">19:06</span>
</div>
</a>
<a href="/performer/MiraVelle" class=" block pt-1 cursor-pointer font-semibold 2xl:text-lg group-hover:text-primary ">MiraVelle</a>
<span class="block text-xs">
<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="bcf1d5ceddead9d0d0d9fc8e8c8e89918d8c918e8c918e8d91888592d1cc88">[email &#160;protected]</a>
</span>
</div>
<div class="group video-item">
<a href="/play/68f704b762f66877cc80da01">
<div class=" relative video-card animate-pulse duration-200 bg-gray-400 bg-opacity-60">
<span class="absolute bottom-1 rounded right-1 text-xs group-hover:bg-red-400 group-hover:text-white bg-white bg-opacity-60 text-black px-1 pb-1">39:04</span>
</div>
</a>
<a href="/performer/MiraVelle" class=" block pt-1 cursor-pointer font-semibold 2xl:text-lg group-hover:text-primary ">MiraVelle</a>
<span class="block text-xs">
<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="aee3c7dccff8cbc2c2cbee9c9e9c9b839f9e839c9f839e9f839b9880c3de9a">[email &#160;protected]</a>
</span>
</div>
<div class="group video-item">
<a href="/play/68f6d7e062f66877cc80d3ba">
<div class=" relative video-card animate-pulse duration-200 bg-gray-400 bg-opacity-60">
<span class="absolute bottom-1 rounded right-1 text-xs group-hover:bg-red-400 group-hover:text-white bg-white bg-opacity-60 text-black px-1 pb-1">43:47</span>
</div>
</a>
<a href="/performer/MiraVelle" class=" block pt-1 cursor-pointer font-semibold 2xl:text-lg group-hover:text-primary ">MiraVelle</a>
<span class="block text-xs">
<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="eea3879c8fb88b82828baedcdedcdbc3dfdec3dcdec3dcddc3dfdec0839eda">[email &#160;protected]</a>
</span>
</div>
<div class="group video-item">
<a href="/play/68f5ef3962f66877cc80bc4d">
<div class=" relative video-card animate-pulse duration-200 bg-gray-400 bg-opacity-60">
<span class="absolute bottom-1 rounded right-1 text-xs group-hover:bg-red-400 group-hover:text-white bg-white bg-opacity-60 text-black px-1 pb-1">20:06</span>
</div>
</a>
<a href="/performer/MiraVelle" class=" block pt-1 cursor-pointer font-semibold 2xl:text-lg group-hover:text-primary ">MiraVelle</a>
<span class="block text-xs">
<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="dd90b4afbc8bb8b1b1b89defedefe8f0ecedf0efedf0edeef0e8ecf3b0ade9">[email &#160;protected]</a>
</span>
</div>
<div class="group video-item">
<a href="/play/68f7261e62f66877cc80e233">
<div class=" relative video-card animate-pulse duration-200 bg-gray-400 bg-opacity-60">
<span class="absolute bottom-1 rounded right-1 text-xs group-hover:bg-red-400 group-hover:text-white bg-white bg-opacity-60 text-black px-1 pb-1">01:00:06</span>
</div>
</a>
<a href="/performer/MiraVelle" class=" block pt-1 cursor-pointer font-semibold 2xl:text-lg group-hover:text-primary ">MiraVelle</a>
<span class="block text-xs">
<a href="/cdn-cgi/l/email-protection" class="__cf_email__" data-cfemail="84c9edf6e5d2e1e8e8e1c4b6b4b6b1a9b5b4a9b6b5a9b4b0a9b4b2aae9f4b0">[email &#160;protected]</a>
</span>
</div>
</div>
</div>
<span class="opacity-0 hover:opacity-100 text-[10px] transition-all duraiton-300 pl-2">Generated At:
<!-- -->
10/21/2025, 7:17:21 AM</span>
</div>
<div class="flex flex-col justify-center items-center py-3 bg-slate-200 dark:bg-dark-200">
<span class="text-sm">
Powered by
<!-- -->
<a href="https://recordjerk.cam/" class="text-red-400 hover:underline">recordjerk.cam</a>
</span>
<div class="mt-1 text-xs underline flex items-center gap-x-2 ">
<a href="/contact">Contact</a>
<a href="/dmca">DMCA</a>
</div>
</div>
</main>
</div>
<script data-cfasync="false" src="/cdn-cgi/scripts/5c5dd728/cloudflare-static/email-decode.min.js"></script>
<script id="__NEXT_DATA__" type="application/json">
</body>
</html>

@ -0,0 +1,60 @@
import requests
from bs4 import BeautifulSoup
import time
def fetch_video_links(page_url):
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"}
try:
response = requests.get(page_url, headers=headers, timeout=10)
response.raise_for_status()
except requests.RequestException as e:
print(f"❌ Failed to fetch {page_url}: {e}")
return []
soup = BeautifulSoup(response.text, "html.parser")
video_links = []
for a_tag in soup.find_all("a", href=True):
if a_tag["href"].startswith("/play/"):
# Build full URL
base_url = "https://striphub.cam"
full_link = base_url + a_tag["href"]
video_links.append(full_link)
print(f"✅ Found {len(video_links)} videos on {page_url}")
return video_links
def crawl_all_pages(base_url, total_pages, output_file="video_links.txt"):
all_links = []
for page in range(1, total_pages + 1):
page_url = f"{base_url}/page/{page}"
print(f"\n🌐 Crawling page {page_url}...")
links = fetch_video_links(page_url)
all_links.extend(links)
time.sleep(1) # polite delay so you don't hammer the server
# Remove duplicates
all_links = list(set(all_links))
# Save all to file
with open(output_file, "w", encoding="utf-8") as f:
for link in all_links:
f.write(link + "\n")
print(f"\n✅ Done! Saved {len(all_links)} unique video links to {output_file}")
return all_links
for link in all_links:
r = requests.get(link)
# Example usage:
crawl_all_pages("https://striphub.cam", total_pages=5)

@ -0,0 +1,91 @@
import requests
from bs4 import BeautifulSoup
import time
MXDROP_PREFIX = "https://mxdrop.to/e/"
def build_session():
s = requests.Session()
s.headers.update({
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.9",
"Connection": "keep-alive",
})
return s
def fetch_video_links(session, page_url, base_domain="https://striphub.cam"):
"""Collect /play/ links from one paginated list page."""
try:
r = session.get(page_url, timeout=10)
r.raise_for_status()
except requests.RequestException as e:
print(f"{page_url} failed: {e}")
return []
soup = BeautifulSoup(r.text, "html.parser")
links = []
for a in soup.find_all("a", href=True):
if a["href"].startswith("/play/"):
links.append(base_domain + a["href"])
print(f"{page_url}: {len(links)} /play/ links")
return links
def extract_iframe_links(session, play_url):
"""Open a /play/ page and collect all <iframe src="https://mxdrop.to/e/...">."""
try:
r = session.get(play_url, timeout=12, headers={"Referer": play_url})
r.raise_for_status()
except requests.RequestException as e:
print(f"❌ could not fetch {play_url}: {e}")
return []
soup = BeautifulSoup(r.text, "html.parser")
embeds = [
iframe["src"]
for iframe in soup.find_all("iframe", src=True)
if iframe["src"].startswith(MXDROP_PREFIX)
]
if embeds:
print(f"🔗 {play_url}: found {len(embeds)} mxdrop embeds")
else:
title = soup.title.string.strip() if soup.title and soup.title.string else "(no title)"
print(f"🚫 {play_url}: no iframes, title={title}")
return embeds
def crawl_all_pages(base_url, total_pages, base_domain="https://striphub.cam",
video_out="video_links.txt", embed_out="embedLinks.txt"):
session = build_session()
# 1⃣ gather all /play/ links
all_play = []
for p in range(1, total_pages + 1):
page_url = f"{base_url}/page/{p}"
print(f"\n🌐 Crawling {page_url}")
all_play.extend(fetch_video_links(session, page_url, base_domain))
time.sleep(0.5)
all_play = sorted(set(all_play))
with open(video_out, "w", encoding="utf-8") as f:
f.writelines(link + "\n" for link in all_play)
print(f"\n✅ saved {len(all_play)} /play/ links → {video_out}")
# 2⃣ visit each /play/ page and collect mxdrop iframes
all_embeds = set()
for i, link in enumerate(all_play, 1):
embeds = extract_iframe_links(session, link)
all_embeds.update(embeds)
print(f"[{i}/{len(all_play)}] total embeds: {len(all_embeds)}")
time.sleep(0.5)
with open(embed_out, "w", encoding="utf-8") as f:
f.writelines(e + "\n" for e in sorted(all_embeds))
print(f"\n✅ saved {len(all_embeds)} mxdrop embeds → {embed_out}")
return all_play, sorted(all_embeds)
# Example usage:
crawl_all_pages("https://striphub.cam", total_pages=5)

@ -0,0 +1,137 @@
# playwright_iframe_extract_debug.py
from playwright.sync_api import sync_playwright
from bs4 import BeautifulSoup
import re, time, os, sys
TEST_URL = "https://striphub.cam/play/68f731ea62f66877cc80e54f" # replace if needed
MX_PATTERN = re.compile(r"https?://(?:www\.)?mxdrop\.to/e/[^\s\"'<>]+", re.I)
def extract_from_html(html: str):
"""Fallback regex scan over raw HTML (catches inline JS/JSON)."""
return set(MX_PATTERN.findall(html))
def run():
found = set()
with sync_playwright() as p:
# Use headless=False while debugging to *see* what's happening
browser = p.chromium.launch(headless=False, args=["--disable-blink-features=AutomationControlled"])
context = browser.new_context(
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
locale="en-US",
viewport={"width": 1280, "height": 900}
)
page = context.new_page()
# 1) Capture ANY network response hitting mxdrop
def on_response(resp):
url = resp.url
if "mxdrop.to/e/" in url:
print("[NET] mxdrop response:", url)
found.add(url)
page.on("response", on_response)
print("Navigating to", TEST_URL)
page.goto(TEST_URL, wait_until="domcontentloaded", timeout=60000)
# 2) Give the page time to settle network/XHR
try:
page.wait_for_load_state("networkidle", timeout=20000)
except Exception:
print("⚠️ networkidle timed out—continuing")
# 3) Try a few generic clicks that often reveal the player/iframe
# (No-ops if not present; they just fail silently)
for sel in [
'button:has-text("Play")',
'button:has-text("I understand")',
'button:has-text("Continue")',
'button:has-text("Accept")',
"#player, .video-player, .plyr__control",
]:
try:
el = page.locator(sel)
if el.count() > 0:
el.first.click(timeout=2000)
except Exception:
pass
# 4) Scroll to bottom to trigger lazy-load iframes
try:
page.evaluate("""
const delay = ms => new Promise(r => setTimeout(r, ms));
(async () => {
for (let y = 0; y < document.body.scrollHeight; y += 800) {
window.scrollTo(0, y);
await delay(200);
}
window.scrollTo(0, document.body.scrollHeight);
})();
""")
time.sleep(1.0)
except Exception:
pass
# 5) Dump a screenshot & HTML so you can inspect what loaded
try:
page.screenshot(path="debug_page.png", full_page=True)
print("Saved screenshot -> debug_page.png")
except Exception:
pass
html = page.content()
with open("debug_page.html", "w", encoding="utf-8") as f:
f.write(html)
print("Saved HTML -> debug_page.html")
# 6) Parse DOM for iframes (src and data-src)
soup = BeautifulSoup(html, "html.parser")
iframes = soup.find_all("iframe")
print(f"Found {len(iframes)} <iframe> tags in DOM")
for iframe in iframes:
for attr in ("src", "data-src"):
val = iframe.get(attr)
if val and "mxdrop.to/e/" in val:
print("[DOM] iframe", attr, "=", val)
found.add(val)
# 7) Regex over the full HTML (catches script-injected strings)
regex_hits = extract_from_html(html)
for u in regex_hits:
print("[HTML-REGEX]", u)
found.update(regex_hits)
# 8) Also list all frame URLs Playwright sees (nested frames)
for fr in page.frames:
if fr.url and "mxdrop.to/e/" in fr.url:
print("[FRAME] url:", fr.url)
found.add(fr.url)
# 9) Print final result
found = sorted(found)
print("\n==== MXDROP RESULTS ====")
if found:
for u in found:
print(u)
else:
title = soup.title.string.strip() if soup.title and soup.title.string else "(no title)"
print("No mxdrop links detected.")
print("Page title:", title)
# Quick hint if you hit a challenge:
snippet = html[:400].replace("\n", " ")
if "Just a moment" in snippet or "Cloudflare" in snippet or "cf-chl" in snippet:
print("Looks like a Cloudflare challenge / interstitial (human step required).")
# 10) Save results if any
if found:
with open("embedLinks.txt", "w", encoding="utf-8") as f:
for u in found:
f.write(u + "\n")
print("Saved -> embedLinks.txt")
browser.close()
if __name__ == "__main__":
run()

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

@ -0,0 +1,137 @@
# playwright_iframe_extract_debug.py
from playwright.sync_api import sync_playwright
from bs4 import BeautifulSoup
import re, time, os, sys
TEST_URL = "https://example.com/play/68f731ea62f66877cc80e54f" # replace if needed
MX_PATTERN = re.compile(r"https?://(?:www\.)?mxdrop\.to/e/[^\s\"'<>]+", re.I)
def extract_from_html(html: str):
"""Fallback regex scan over raw HTML (catches inline JS/JSON)."""
return set(MX_PATTERN.findall(html))
def run():
found = set()
with sync_playwright() as p:
# Use headless=False while debugging to *see* what's happening
browser = p.chromium.launch(headless=False, args=["--disable-blink-features=AutomationControlled"])
context = browser.new_context(
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
locale="en-US",
viewport={"width": 1280, "height": 900}
)
page = context.new_page()
# 1) Capture ANY network response hitting mxdrop
def on_response(resp):
url = resp.url
if "mxdrop.to/e/" in url:
print("[NET] mxdrop response:", url)
found.add(url)
page.on("response", on_response)
print("Navigating to", TEST_URL)
page.goto(TEST_URL, wait_until="domcontentloaded", timeout=60000)
# 2) Give the page time to settle network/XHR
try:
page.wait_for_load_state("networkidle", timeout=20000)
except Exception:
print("⚠️ networkidle timed out—continuing")
# 3) Try a few generic clicks that often reveal the player/iframe
# (No-ops if not present; they just fail silently)
for sel in [
'button:has-text("Play")',
'button:has-text("I understand")',
'button:has-text("Continue")',
'button:has-text("Accept")',
"#player, .video-player, .plyr__control",
]:
try:
el = page.locator(sel)
if el.count() > 0:
el.first.click(timeout=2000)
except Exception:
pass
# 4) Scroll to bottom to trigger lazy-load iframes
try:
page.evaluate("""
const delay = ms => new Promise(r => setTimeout(r, ms));
(async () => {
for (let y = 0; y < document.body.scrollHeight; y += 800) {
window.scrollTo(0, y);
await delay(200);
}
window.scrollTo(0, document.body.scrollHeight);
})();
""")
time.sleep(1.0)
except Exception:
pass
# 5) Dump a screenshot & HTML so you can inspect what loaded
try:
page.screenshot(path="debug_page.png", full_page=True)
print("Saved screenshot -> debug_page.png")
except Exception:
pass
html = page.content()
with open("debug_page.html", "w", encoding="utf-8") as f:
f.write(html)
print("Saved HTML -> debug_page.html")
# 6) Parse DOM for iframes (src and data-src)
soup = BeautifulSoup(html, "html.parser")
iframes = soup.find_all("iframe")
print(f"Found {len(iframes)} <iframe> tags in DOM")
for iframe in iframes:
for attr in ("src", "data-src"):
val = iframe.get(attr)
if val and "mxdrop.to/e/" in val:
print("[DOM] iframe", attr, "=", val)
found.add(val)
# 7) Regex over the full HTML (catches script-injected strings)
regex_hits = extract_from_html(html)
for u in regex_hits:
print("[HTML-REGEX]", u)
found.update(regex_hits)
# 8) Also list all frame URLs Playwright sees (nested frames)
for fr in page.frames:
if fr.url and "mxdrop.to/e/" in fr.url:
print("[FRAME] url:", fr.url)
found.add(fr.url)
# 9) Print final result
found = sorted(found)
print("\n==== MXDROP RESULTS ====")
if found:
for u in found:
print(u)
else:
title = soup.title.string.strip() if soup.title and soup.title.string else "(no title)"
print("No mxdrop links detected.")
print("Page title:", title)
# Quick hint if you hit a challenge:
snippet = html[:400].replace("\n", " ")
if "Just a moment" in snippet or "Cloudflare" in snippet or "cf-chl" in snippet:
print("Looks like a Cloudflare challenge / interstitial (human step required).")
# 10) Save results if any
if found:
with open("embedLinks.txt", "w", encoding="utf-8") as f:
for u in found:
f.write(u + "\n")
print("Saved -> embedLinks.txt")
browser.close()
if __name__ == "__main__":
run()

@ -0,0 +1,7 @@
import requests
url = "https://tubeasiancams.com/embed/player-6w.php?token=ez0SLbAi85b0Oje3m7YA75ilxdtoQzrHN%2B3Be1YMFqoCMLLWBmQE%2BOKohasr08C61WCxSCFrujvs1q5ifTyvyuo4Xq7uLVzP2Ypc7TrRymDYu8cqs%2BepKqkIzh8caDxZDRXedt9%2B3JXNxAFDfsEw%2Fw%3D%3D&poster=https://tubeasiancams.com/wp-content/uploads/2025/10/52r85ux6mdtf-640x360.jpg&id=231245"
data = requests.get(url)
with open("html.text", 'w', encoding='utf-8') as f:
f.write(data.text)

@ -0,0 +1,31 @@
import requests
API_KEY = "1058i9fguonl5ibz854n" # replace with your real key
API_URL = "https://abstream.to/api/upload/url"
VIDEO_URL = "https://abstream.to/embed/s5nntho2s6rk" # replace with the video embed/download URL
# Optional fields:
params = {
"key": API_KEY,
"url": VIDEO_URL,
"file_public": 1, # 1 = public, 0 = private
"file_adult": 1, # 1 = adult, 0 = safe
"tags": "test, upload" # optional tagsR
}
response = requests.get(API_URL, params=params)
if response.status_code == 200:
data = response.json()
if data["status"] == 200:
filecode = data["result"]["filecode"]
print(f"✅ Upload started successfully! File code: {filecode}")
print(f"Embed page: https://abstream.to/{filecode}.html")
else:
print("❌ API error:", data["msg"])
else:
print("❌ Request failed:", response.status_code, response.text)

@ -0,0 +1,34 @@
import requests
# Your API credentials
login = "xpornium.unstopped516@passinbox.com"
key = "cc68a4602270850382925da35aadab72"
# The direct video URL (or embed link that points to the file)
video_url = "https://cmb-202.filestore.app/4a8d5985765a8/edcbaa88ef611/alternative_resolution_c7d5e3f0d6372_360p.mp4?temp_url_expires=1761075762&temp_url_id=18c469ed-f17b-4f12-b57a-f1d7107912b7&countable=true&filename=littlehapp+Tits+Show.mp4&inline=true&content_type=video%2Fmp4&concurrency=32&rate_limit=0&response_limit=31332000&ip_access_policy=everyone&tags=file_type%3Avideo_360p%2Caction%3Alimited_view%2Cproject%3Atz%2Cowner_id%3A1556%2Csite_owner_id%3A1556%2Csize%3A242565119%2Cfile_id%3Afed8abf418351%2Csite_id%3A21828%2Cuser_pc_id%3Acb106f428ab63%2Cis_partner_side%3Afalse%2Ccts%3A1760989362%2Cdevice_type%3Adesktop%2Cuser_os%3AWindows%2Cuser_file_id%3Afed8abf418351%2Cuser_id%3A&temp_url_issuer=5445f78a91de707b297e67ed&temp_url_sig=0b49ec49d60be6977faf16300224c11e1a548462eb8dcb13291aa3ad9b4731247995c10ade9cbcb928f098e6767c30677dfe91889f883c99f5f4e38ee8d01e6e&client_ip="
# Optional: title and category
title = "My test upload"
cat = 1 # 1 = Straight
# Construct the request URL
api_url = "https://api.xpornium.net/remoteupload"
parameters = {
"login": login,
"key": key,
"url": video_url,
"cat": cat,
"title": title
}
# Send the GET request
response = requests.get(api_url, params=parameters)
data = response.json()
# Check result
if data["status"] == 200:
upload_id = data["result"]["id"]
print(f"✅ Remote upload started! Upload ID: {upload_id}")
else:
print(f"❌ Error: {data['error']}")

@ -0,0 +1,8 @@
DB_HOST=192.168.0.27
DB_PORT=3306
DB_USER=idanigga
DB_PASS=j0cy8c311024x3
DB_NAME=crawler
XPORNIUM_LOGIN=xpornium.unstopped516@passinbox.com
XPORNIUM_API_KEY=cc68a4602270850382925da35aadab72

@ -0,0 +1,17 @@
import mysql.connector, os
from dotenv import load_dotenv
load_dotenv()
def get_db_connection():
try:
db_host = os.getenv("DB_HOST")
db_user = os.getenv("DB_USER")
db_pass = os.getenv("DB_PASS")
db_name = os.getenv("DB_NAME")
db = mysql.connector.connect(host=db_host, user=db_user, password=db_pass, database=db_name)
except mysql.connector.Error as err:
print(f"❌ Failed to connect to DB: {err}")
return # dont continue if DB failed
return db

@ -0,0 +1,133 @@
from config import get_db_connection
import requests, time, mysql.connector
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from datetime import datetime
def insert_video_to_db(data):
db = get_db_connection()
try:
cursor = db.cursor()
sql = """
INSERT IGNORE INTO videos (username, url, title, date, embed_link, source_url, created_at)
VALUES (%s, %s, %s, %s, %s, %s, NOW())
"""
values = (
data['username'],
data['url'],
data['title'],
data['date'],
data['embed_link'],
data['source_url']
)
cursor.execute(sql, values)
db.commit()
print("✅ Inserted into DB!")
except mysql.connector.Error as err:
print(f"❌ Failed to insert: {err}")
finally:
cursor.close()
db.close()
def crawl_user_page(base_url, user_path):
full_url = urljoin(base_url, user_path)
response = requests.get(full_url)
if response.status_code != 200:
print(f"❌ Failed to load {full_url}")
return None
soup = BeautifulSoup(response.text, "html.parser")
def parse_data(soup):
username = user_path.strip("/")
title_tag = soup.find("h1", class_="entry-title")
title = title_tag.text.strip() if title_tag else "(no title)"
date_tag = soup.find("span", class_="entry-date")
date = date_tag.text.strip() if date_tag else None
if date:
try:
date_obj = datetime.strptime(date, "%d/%m/%Y")
date = date_obj.strftime("%Y-%m-%d")
except ValueError:
print(f"⚠️ Failed to parse date: {date}")
date = None
embed_link = None
for iframe in soup.find_all("iframe", src=True):
src = iframe["src"]
if "xpornium.net" in src:
embed_link = src # no urljoin needed!
break # stop after finding the first match
# --- print info after crawling this user ---
print(f"\n✅ Scraped {username}: — {date}")
# -------------------------------------------
return {
"username": username,
"title": title,
"date": date,
"embed_link": embed_link,
}
def crawl_all(init_url):
"""Crawl page by page and extract user data as we go."""
page = 1
all_data = []
while True:
url = f"{init_url}?p={page}"
print(f"\n🕷️ Crawling index page {page}: {url}")
response = requests.get(url)
if response.status_code != 200:
print(f"❌ Page {page} returned {response.status_code}, stopping.")
break
soup = BeautifulSoup(response.text, "html.parser")
user_links = soup.find_all("a", class_="thumbnail-link", href=True)
if not user_links:
print("⚠️ No user links found — reached end of site.")
break
for link in user_links:
user_path = link["href"]
user_data = crawl_user_page(init_url, user_path)
if not user_data:
print("⚠️ Skipping empty user_data.")
continue
if not user_data["embed_link"]:
print(f"⚠️ Skipping {user_data['username']} - no embed link found.")
continue
insert_video_to_db(user_data)
time.sleep(0.5)
page += 1
time.sleep(1)
print(f"\n✅ Finished crawling all pages. Total users: {len(all_data)}")
return all_data
if __name__ == "__main__":
BASE_URL = "https://webcamrips.to"
results = crawl_all(BASE_URL)
print("💾 All data saved to users_data.json")

@ -0,0 +1,208 @@
import requests, os, time, mysql.connector, json
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from datetime import datetime
from dotenv import load_dotenv
from xpornium import get_file_info, upload_video, get_upload_url, remote_upload
load_dotenv()
def insert_video_to_db(data):
try:
db_host = os.getenv("DB_HOST")
db_user = os.getenv("DB_USER")
db_pass = os.getenv("DB_PASS")
db_name = os.getenv("DB_NAME")
db = mysql.connector.connect(host=db_host, user=db_user, password=db_pass, database=db_name)
except mysql.connector.Error as err:
print(f"❌ Failed to connect to DB: {err}")
return # dont continue if DB failed
try:
cursor = db.cursor()
sql = """
INSERT IGNORE INTO videos (username, url, title, date, embed_link, source_url, created_at)
VALUES (%s, %s, %s, %s, %s, %s, NOW())
"""
values = (
data['username'],
data['url'],
data['title'],
data['date'],
data['embed_link'],
data['source_url']
)
cursor.execute(sql, values)
db.commit()
print("✅ Inserted into DB!")
except mysql.connector.Error as err:
print(f"❌ Failed to insert: {err}")
finally:
cursor.close()
db.close()
def save_xpornium_upload(embed_link, fileid, xpornium_url, title, cat_id, duration, thumbnail):
try:
db = mysql.connector.connect(
host=os.getenv("DB_HOST"),
user=os.getenv("DB_USER"),
password=os.getenv("DB_PASS"),
database=os.getenv("DB_NAME")
)
cursor = db.cursor()
new_embed_link = f"https://xpornium.net/embed/{fileid}"
sql = """
INSERT INTO xpornium_uploads
(original_embed_link, xpornium_fileid, xpornium_url, new_embed_link, title, category_id, uploaded_at, duration, thumbnail)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
values = (
embed_link,
fileid,
xpornium_url,
new_embed_link,
title,
cat_id,
datetime.now(),
duration,
thumbnail
)
cursor.execute(sql, values)
db.commit()
print(f"✅ Saved to DB: {fileid} | duration: {duration}s | thumb: {thumbnail}")
except Exception as e:
print(f"❌ DB Save Failed: {e}")
finally:
if db and db.is_connected():
cursor.close()
db.close()
def crawl_user_page(base_url, user_path):
"""Visits one 'url/username' page and extracts info."""
full_url = urljoin(base_url, user_path)
response = requests.get(full_url)
if response.status_code != 200:
print(f"❌ Failed to load {full_url}")
return None
soup = BeautifulSoup(response.text, "html.parser")
username = user_path.strip("/")
title_tag = soup.find("h1", class_="entry-title")
title = title_tag.text.strip() if title_tag else "(no title)"
source_url = full_url
date_tag = soup.find("span", class_="entry-date")
date = date_tag.text.strip() if date_tag else None
# Convert DD/MM/YYYY → YYYY-MM-DD
if date:
try:
date_obj = datetime.strptime(date, "%d/%m/%Y")
date = date_obj.strftime("%Y-%m-%d")
except ValueError:
print(f"⚠️ Failed to parse date: {date}")
date = None
embed_link = None
for iframe in soup.find_all("iframe", src=True):
src = iframe["src"]
if "xpornium.net" in src:
embed_link = src # no urljoin needed!
break # stop after finding the first match
# --- print info after crawling this user ---
print(f"\n✅ Scraped {username}: — {date}")
# -------------------------------------------
return {
"username": username,
"url": full_url,
"title": title,
"date": date,
"embed_link": embed_link,
"source_url": source_url
}
def crawl_all(init_url):
"""Crawl page by page and extract user data as we go."""
page = 1
all_data = []
while True:
url = f"{init_url}?p={page}"
print(f"\n🕷 Crawling index page {page}: {url}")
response = requests.get(url)
if response.status_code != 200:
print(f"❌ Page {page} returned {response.status_code}, stopping.")
break
soup = BeautifulSoup(response.text, "html.parser")
user_links = soup.find_all("a", class_="thumbnail-link", href=True)
if not user_links:
print("⚠️ No user links found — reached end of site.")
break
for link in user_links:
user_path = link["href"]
user_data = crawl_user_page(init_url, user_path)
if not user_data:
print("⚠️ Skipping empty user_data.")
continue
if not user_data["embed_link"]:
print(f"⚠️ Skipping {user_data['username']} - no embed link found.")
continue
insert_video_to_db(user_data)
# Get fileid and xpornium url
fileid = user_data["embed_link"].split("/")[-1]
xpornium_url = f"https://xpornium.net/embed/{fileid}"
remote_upload(xpornium_url)
# Get file info (duration, thumb)
info_response = get_file_info(fileid)
info_json = info_response.json()
if info_json.get("status") != 200 or not info_json.get("result"):
print(f"❌ Failed to get file info for {fileid}")
continue
info = info_json["result"][0]
duration = info.get("duration") or 0
thumbnail = info.get("thumbnail") or ""
# Save to DB
save_xpornium_upload(user_data["embed_link"], fileid, xpornium_url, user_data["title"], 127, duration, thumbnail)
time.sleep(0.5)
page += 1
time.sleep(1)
print(f"\n✅ Finished crawling all pages. Total users: {len(all_data)}")
return all_data
if __name__ == "__main__":
BASE_URL = "https://webcamrips.to"
results = crawl_all(BASE_URL)
print("💾 All data saved to users_data.json")

@ -0,0 +1,78 @@
import requests
LOGIN='xpornium.unstopped516@passinbox.com'
KEY='cc68a4602270850382925da35aadab72'
VIDEO_CATEGORY=127
def get_file_info(file_id):
params = {
"login": LOGIN,
"key": KEY,
"id": file_id
}
url = "https://api.xpornium.net/fileinfo"
data = requests.get(url, params=params)
return data
def file_duplicate(file_id):
params = {'login':LOGIN,
'key':KEY,
'id':file_id
}
url = 'https://api.xpornium.net/fileduplicate'
data = requests.get(url, params=params)
return data
def edit_file(file_id):
params = {
'login':LOGIN,
'key':KEY,
'id':file_id
}
url = 'https://api.xpornium.net/fileedit'
data = requests.get(url, params=params)
return data
def file_delete(file_id):
params = {'login':LOGIN,
'key':KEY,
'id':file_id
}
url = 'https://api.xpornium.net/filedelete'
data = requests.get(url, params=params)
return data
def get_upload_url():
params = {'login':LOGIN,
'key':KEY,
}
url = 'https://api.xpornium.net/getupload'
data = requests.get(url, params=params)
return data
def upload_video(upload_url, embed_link):
params = {'login':LOGIN,
'key':KEY,
'cat':VIDEO_CATEGORY,
'file':embed_link
}
data = requests.post(upload_url, params=params)
return data
def get_upload_status(file_id):
params = {'login':LOGIN,
'key':KEY,
'id':file_id
}
url = 'https://api.xpornium.net/remotestatus'
data = requests.get(url, params=params)
return data
def remote_upload(embed_link):
params = {'login':LOGIN,
'key':KEY,
'url':embed_link
}
url = 'https://api.xpornium.net/remoteupload'
data = requests.get(url, params=params)
return data
Loading…
Cancel
Save