rararara9999 commited on
Commit
c071fee
·
verified ·
1 Parent(s): 68f31d1

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -154
app.py DELETED
@@ -1,154 +0,0 @@
1
- import subprocess
2
-
3
- # Install the required packages
4
- subprocess.check_call(["pip", "install", "-U", "git+https://github.com/huggingface/transformers.git"])
5
- subprocess.check_call(["pip", "install", "-U", "git+https://github.com/huggingface/accelerate.git"])
6
- subprocess.check_call(["pip", "install", "datasets"])
7
- subprocess.check_call(["pip", "install", "evaluate"])
8
- subprocess.check_call(["pip", "install", "scikit-learn"])
9
- subprocess.check_call(["pip", "install", "torchvision"])
10
-
11
- model_checkpoint = "microsoft/resnet-50"
12
- batch_size = 128
13
-
14
- from datasets import load_dataset
15
- from evaluate import load
16
-
17
- metric = load("accuracy")
18
-
19
- # Load the dataset directly from Hugging Face
20
- dataset = load_dataset("DamarJati/Face-Mask-Detection")
21
- labels = dataset["train"].features["label"].names
22
- label2id, id2label = dict(), dict()
23
- for i, label in enumerate(labels):
24
- label2id[label] = i
25
- id2label[i] = label
26
-
27
- from transformers import AutoImageProcessor
28
- image_processor = AutoImageProcessor.from_pretrained(model_checkpoint)
29
- image_processor
30
-
31
- from torchvision.transforms import (
32
- CenterCrop,
33
- Compose,
34
- Normalize,
35
- RandomHorizontalFlip,
36
- RandomResizedCrop,
37
- Resize,
38
- ToTensor,
39
- ColorJitter,
40
- RandomRotation
41
- )
42
-
43
- normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std)
44
-
45
- # Check if size is a dictionary with height and width keys
46
- if isinstance(image_processor.size, dict) and "height" in image_processor.size and "width" in image_processor.size:
47
- size = (image_processor.size["height"], image_processor.size["width"])
48
- else:
49
- size = (224, 224) # Default size if not specified
50
-
51
- train_transforms = Compose(
52
- [
53
- RandomResizedCrop(size),
54
- RandomHorizontalFlip(),
55
- RandomRotation(degrees=15),
56
- ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1),
57
- ToTensor(),
58
- normalize,
59
- ]
60
- )
61
-
62
- val_transforms = Compose(
63
- [
64
- Resize(size),
65
- CenterCrop(size),
66
- RandomRotation(degrees=15),
67
- ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1),
68
- ToTensor(),
69
- normalize,
70
- ]
71
- )
72
-
73
- def preprocess_train(example_batch):
74
- example_batch["pixel_values"] = [
75
- train_transforms(image.convert("RGB")) for image in example_batch["image"]
76
- ]
77
- return example_batch
78
-
79
- def preprocess_val(example_batch):
80
- example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]]
81
- return example_batch
82
-
83
- splits = dataset["train"].train_test_split(test_size=0.3)
84
- train_ds = splits['train']
85
- val_ds = splits['test']
86
-
87
- train_ds.set_transform(preprocess_train)
88
- val_ds.set_transform(preprocess_val)
89
-
90
- from transformers import AutoModelForImageClassification, TrainingArguments, Trainer
91
-
92
- model = AutoModelForImageClassification.from_pretrained(model_checkpoint,
93
- label2id=label2id,
94
- id2label=id2label,
95
- ignore_mismatched_sizes = True)
96
-
97
- model_name = model_checkpoint.split("/")[-1]
98
-
99
- args = TrainingArguments(
100
- f"{model_name}-finetuned",
101
- remove_unused_columns=False,
102
- evaluation_strategy = "epoch",
103
- save_strategy = "epoch",
104
- save_total_limit = 5,
105
- learning_rate=1e-3,
106
- per_device_train_batch_size=batch_size,
107
- gradient_accumulation_steps=2,
108
- per_device_eval_batch_size=batch_size,
109
- num_train_epochs=2,
110
- warmup_ratio=0.1,
111
- weight_decay=0.01,
112
- lr_scheduler_type="cosine",
113
- logging_steps=10,
114
- load_best_model_at_end=True,
115
- metric_for_best_model="accuracy",)
116
-
117
- import numpy as np
118
-
119
- def compute_metrics(eval_pred):
120
- """Computes accuracy on a batch of predictions"""
121
- predictions = np.argmax(eval_pred.predictions, axis=1)
122
- return metric.compute(predictions=predictions, references=eval_pred.label_ids)
123
-
124
- import torch
125
-
126
- def collate_fn(examples):
127
- pixel_values = torch.stack([example["pixel_values"] for example in examples])
128
- labels = torch.tensor([example["label"] for example in examples])
129
- return {"pixel_values": pixel_values, "labels": labels}
130
-
131
- trainer = Trainer(model,
132
- args,
133
- train_dataset=train_ds,
134
- eval_dataset=val_ds,
135
- tokenizer=image_processor,
136
- compute_metrics=compute_metrics,
137
- data_collator=collate_fn,)
138
-
139
- train_results = trainer.train()
140
- # Save model
141
- trainer.save_model()
142
- trainer.log_metrics("train", train_results.metrics)
143
- trainer.save_metrics("train", train_results.metrics)
144
- trainer.save_state()
145
-
146
- metrics = trainer.evaluate()
147
- # Log and save metrics
148
- trainer.log_metrics("eval", metrics)
149
- trainer.save_metrics("eval", metrics)
150
-
151
- # Print evaluation metrics
152
- print("Evaluation Metrics:")
153
- for key, value in metrics.items():
154
- print(f"{key}: {value}")