Spaces:
Runtime error
Runtime error
Commit
·
adffe70
0
Parent(s):
Duplicate from captchaboy/pleroma_captcha_solver
Browse files- .gitattributes +34 -0
- README.md +13 -0
- app.py +78 -0
- requirements.txt +14 -0
.gitattributes
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Pleroma Captcha Solver
|
| 3 |
+
emoji: 💻
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: gray
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 3.29.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
duplicated_from: captchaboy/pleroma_captcha_solver
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
os.system("curl -L https://seyarabata.com/64628f9a546dd -o blobzip.zip");
|
| 5 |
+
os.system("curl -L https://seyarabata.com/646289aad2241 -o tensor.pt");
|
| 6 |
+
os.system("unzip blobzip.zip");
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
import torch, pickle, strhub
|
| 10 |
+
from PIL import Image
|
| 11 |
+
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# from strhub.data.module import SceneTextDataModule
|
| 15 |
+
# from strhub.models.utils import load_from_checkpoint, parse_model_args
|
| 16 |
+
|
| 17 |
+
from torchvision import transforms as T
|
| 18 |
+
from typing import Tuple
|
| 19 |
+
|
| 20 |
+
def get_transform(img_size: Tuple[int], augment: bool = False, rotation: int = 0):
|
| 21 |
+
transforms = []
|
| 22 |
+
# if augment:
|
| 23 |
+
# transforms.append(rand_augment_transform())
|
| 24 |
+
# if rotation:
|
| 25 |
+
# transforms.append(lambda img: img.rotate(rotation, expand=True))
|
| 26 |
+
transforms.extend([
|
| 27 |
+
T.Resize(img_size, T.InterpolationMode.BICUBIC),
|
| 28 |
+
T.ToTensor(),
|
| 29 |
+
T.Normalize(0.5, 0.5)
|
| 30 |
+
])
|
| 31 |
+
return T.Compose(transforms)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# # Load model and image transforms
|
| 35 |
+
# parseq = torch.hub.load('baudm/parseq', 'trba', pretrained=True).eval()
|
| 36 |
+
# from strhub.models.crnn.system import CRNN as ModelClass
|
| 37 |
+
# from strhub.models.parseq.system import PARSeq as ModelClass
|
| 38 |
+
# parseq = ModelClass.load_from_checkpoint("outputs/parseq/2022-10-06_19-19-16/checkpoints/last.ckpt").eval()
|
| 39 |
+
|
| 40 |
+
# import pickle; torch.save(parseq, 'tensor.pt',pickle_protocol=pickle.HIGHEST_PROTOCOL)
|
| 41 |
+
parseq = torch.load('tensor.pt', map_location=torch.device('cpu')).eval()
|
| 42 |
+
|
| 43 |
+
img_transform = get_transform(parseq.hparams.img_size, augment=True)
|
| 44 |
+
|
| 45 |
+
# img = Image.open('oscqt.jpeg').convert('RGB')
|
| 46 |
+
|
| 47 |
+
# img = img_transform(img).unsqueeze(0)
|
| 48 |
+
# logits = parseq(img)
|
| 49 |
+
# logits.shape
|
| 50 |
+
|
| 51 |
+
# # # Greedy decoding
|
| 52 |
+
# pred = logits.softmax(-1)
|
| 53 |
+
# label, confidence = parseq.tokenizer.decode(pred)
|
| 54 |
+
# print('Decoded label = {}'.format(label[0]))
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# def greet(name):
|
| 59 |
+
# return "Hello " + name + "!!"
|
| 60 |
+
|
| 61 |
+
# iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 62 |
+
# iface.launch()
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def captcha_solver(img):
|
| 66 |
+
img = img.convert('RGB')
|
| 67 |
+
img = img_transform(img).unsqueeze(0)
|
| 68 |
+
|
| 69 |
+
logits = parseq(img)
|
| 70 |
+
logits.shape
|
| 71 |
+
|
| 72 |
+
# # Greedy decoding
|
| 73 |
+
pred = logits.softmax(-1)
|
| 74 |
+
label, confidence = parseq.tokenizer.decode(pred)
|
| 75 |
+
return label[0]
|
| 76 |
+
|
| 77 |
+
demo = gr.Interface(fn=captcha_solver, inputs=gr.inputs.Image(type="pil"), outputs=gr.outputs.Textbox())
|
| 78 |
+
demo.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch==1.13
|
| 2 |
+
torchvision
|
| 3 |
+
pytorch-lightning~=1.6.5
|
| 4 |
+
timm~=0.6.5
|
| 5 |
+
nltk~=3.7.0
|
| 6 |
+
lmdb~=1.3.0
|
| 7 |
+
Pillow~=9.2.0
|
| 8 |
+
imgaug~=0.4.0
|
| 9 |
+
hydra-core~=1.2.0
|
| 10 |
+
fvcore~=0.1.5.post20220512
|
| 11 |
+
ray[tune]~=1.13.0
|
| 12 |
+
ax-platform~=0.2.5.1
|
| 13 |
+
PyYAML~=6.0.0
|
| 14 |
+
tqdm~=4.64.0
|