from fastai.learner import load_learner
from fastai.vision.all import *
import os
import contextlib
import warnings
import panel as pn
from huggingface_hub import push_to_hub_fastai, notebook_login
from datasets import load_dataset
# from datasets import ImageFolder
pn.extension()"ignore") warnings.filterwarnings(
Compatibility Block
Check Platform
Platform & Environment Configuration
Imports
Public Imports
Private Imports
from aiking.core import aiking_settings
from aiking.data.external import *
Loading Learner
We have already trained our model in previous notebook. We now quickly review steps to verify the model.
= 'PandemicSafety'
dsname = "pandemic_v2.pkl"
model_fname = load_learner((aiking_path('model')/model_fname)); learn learn
<fastai.learner.Learner>
Testing model locally
= pn.widgets.FileInput(); uploader uploader
uploader.filename
'Asian-man-wearing-a-mask-is-sick-pointing-finger-PNG[2].png'
= pn.pane.Image(uploader.value); im im
learn.predict(uploader.value)
('No_Mask', tensor(1), tensor([0.4785, 0.5215]))
learn.predict(PILImage.create(uploader.value))
('No_Mask', tensor(1), tensor([0.4785, 0.5215]))
= pn.widgets.FileInput(); uploader uploader
PILImage.create(uploader.value)
learn.predict(PILImage.create(uploader.value))
('Mask', tensor(0), tensor([9.9937e-01, 6.2690e-04]))
Warning
Model is not perfect. It fails to recognize first image.( White dress, same as mask) with background removed. But correctly recognizes second image with the background.
An idea to improve the model could be to show it more examples of similar types above if we can collect it. We can also try more epochs and RandomResizedCorp
and aug_transform
as discussed in the lecture.
Uploading Learner to Huggingface Hub
@contextlib.contextmanager
def preserve_cwd(new_dir):
= os.getcwd()
curdir
os.chdir(new_dir)try: yield
finally: os.chdir(curdir)
notebook_login()
= f"Model-{dsname}"; repo_name
repo_name = os.getenv('HUGGINGFACE_HUB_USERNAME') uname
with preserve_cwd(os.getenv('HUGGINGFACE_HUB_DIR')):
=learn, repo_id=f"{uname}/{repo_name}") push_to_hub_fastai(learner
Uploading Dataset to Huggingface Hub
= f"Dataset-{dsname}"; repo_name
repo_name = os.getenv('HUGGINGFACE_HUB_USERNAME') uname
= aiking_path('data')/f"{dsname}"; ds_dir.exists() ds_dir
True
= load_dataset('imagefolder',data_dir=ds_dir); ds ds
DatasetDict({
train: Dataset({
features: ['image', 'label'],
num_rows: 309
})
})
ds
DatasetDict({
train: Dataset({
features: ['image', 'label'],
num_rows: 309
})
})
f"{uname}/{repo_name}", private=True) ds.push_to_hub(
Warning
Datasets have specific format for image files. Additionally I have not yet understood a way to maintain versions/ data cleaning steps etc… without copying the data multiple times. For now I intend to use this as a bin for saving downloaded images in parquet format