import pandas as pd
import plotly.express as px
from fastcore.all import *
import streamlit as st
Compatibility Block
Check Platform
Platform & Environment Configuration
Imports
="Which Image Model is best?",layout="wide") st.set_page_config(page_title
from streamlit_jupyter import StreamlitPatcher, tqdm
def get_results(result_option = 'original'):
= "" if result_option == 'original' else "-real"
suffix = f"https://github.com/huggingface/pytorch-image-models/raw/main/results/results-imagenet{suffix}.csv"
url_results = pd.read_csv(url_results); df_results.head()
df_results 'model_org'] = df_results['model']
df_results['model'] = df_results['model'].str.split('.').str[0]
df_results[return df_results
# url_results = f"https://github.com/huggingface/pytorch-image-models/raw/main/results/results-imagenet.csv"
# url_results = f"https://raw.githubusercontent.com/huggingface/pytorch-image-models/main/results/results-imagenet.csv"
# pd.read_csv(url_results)
def get_integrated_data(activity_option, result_option):
= get_results(result_option)
df_results = f"https://github.com/huggingface/pytorch-image-models/raw/main/results/benchmark-{activity_option}-amp-nhwc-pt112-cu113-rtx3090.csv"
url_benchmark = pd.read_csv(url_benchmark)
df_benchmark = df_results.merge(df_benchmark, on='model')
df_integrated 'is_tensorflow_model'] = df_integrated.model.str.split('_').str[0] =='tf'
df_integrated['family'] = df_integrated.model.str.removeprefix("tf_").str.removeprefix("legacy_").str.removeprefix("nf_").str.removeprefix("nf_").str.extract('^([a-z]+?(?:v2|v3)?)(?:\d|_|$)')[0].values
df_integrated[str.contains('in22'), 'family'] = df_integrated.loc[df_integrated.model.str.contains('in22'), 'family'] + "_in22"
df_integrated.loc[df_integrated.model.str.contains('resnet.*d'), 'family'] = df_integrated.loc[df_integrated.model.str.contains('resnet.*d'), 'family'] + "d"
df_integrated.loc[df_integrated.model.return df_integrated[~df_integrated.model.str.endswith('gn')] # Group norm models. Why Jeremy eliminated them from analysis?
# df_integrated= get_integrated_data('train', 'original')
# models = L(df_integrated.model.values.tolist()); models
# families = L(df_integrated.family.values.tolist()); families.unique()
# df_integrated[df_integrated.model.str.endswith('gn')] # Group norm models. Why Jeremy eliminated them from analysis?
# df_integrated[df_integrated.model.str.contains('rs')]
# df_integrated.model.str.lstrip("tf_").str.extract('^([a-z]+?(?:v2|v3)?)(?:\d|_|$0)')[0].values
# # "cspresnet50".removeprefix("legacy_")
# df_integrated= get_integrated_data('train', 'original')
# df_integrated
@st.cache_data
def get_filtered_data(df_integrated, subs, is_fullmatch=False, drop_tf=True):
if drop_tf: df_integrated = df_integrated[~df_integrated.is_tensorflow_model]
if not subs: return df_integrated
elif is_fullmatch: return df_integrated[df_integrated.family.str.fullmatch(subs)]
else: return df_integrated[df_integrated.model.str.contains(subs)]
def get_data(col_option, activity_option, result_option, subs, is_fullmatch=False, drop_tf=True):
= "_".join([activity_option, col_option])
col = get_integrated_data(activity_option, result_option)
df_integrated = get_filtered_data(df_integrated, subs, is_fullmatch=is_fullmatch, drop_tf=drop_tf)
df_integrated 'secs'] =1./df_integrated[col]
df_integrated[return df_integrated
def plot_selection(df, title, col_option, activity_option, w=1000, h=800):
= "_".join([activity_option, col_option])
size_col return px.scatter(df, width=w, height=h, size=df[size_col]**2,trendline="ols", trendline_options={'log_x':True},
=title, x="secs",log_x=True, y='top1', log_y=True,
title="family", hover_name='model_org',
color=[size_col]) hover_data
= ['original', 'real'] #result = 'real'
result_options = ['train', 'infer']
activity_options = ['samples_per_sec', 'step_time', 'batch_size', 'img_size', 'gmacs', 'macts']
col_options = '^re[sg]netd?|beit|convnext|levit|efficient|vit|vgg|swin'
subs = False
is_fullmatch = False
drop_tf = 'levit|resnetd?|regnetx|vgg|convnext.*|efficientnetv2|beit|swin'
subs = True
is_fullmatch = result_options[0]
result_option = activity_options[1]
activity_option = col_options[0]
col_option = col_options[3]
size_col_option = dict(zip(activity_options, ['Training', "Inference"]))
title_dict = get_data(col_option, activity_option, result_option, subs, is_fullmatch=is_fullmatch, drop_tf=drop_tf)
df = plot_selection(df, title_dict[activity_option], size_col_option, activity_option) fig
from IPython.display import HTML
HTML(fig.to_html())
Streamlit App
= StreamlitPatcher()
sp sp.jupyter()
"Which Image Model is best?")
st.title(= st.columns([1,3])
col1, col2 with col1:
"Settings")
st.header(= st.selectbox("Please choose dataset", result_options)
result_option = st.selectbox("Please choose activity", activity_options)
activity_option = st.text_input("Subs", value='levit|resnetd?|regnetx|vgg|convnext.*|efficientnetv2|beit|swin')
subs = st.checkbox("Is fullmatch", value=True)
is_fullmatch = st.checkbox("Drop Tensorflow Models", value=False)
drop_tf = st.selectbox("Please choose col_option", col_options)
col_option = st.selectbox("Please choose sizing col_option", col_options, index=3)
size_col_option with col2:
= dict(zip(activity_options, ['Training', "Inference"]))
title_dict = get_data(col_option, activity_option, result_option, subs, is_fullmatch=is_fullmatch, drop_tf=drop_tf)
df = plot_selection(df, None, size_col_option, activity_option, h=500, w=1000)
fig # Plot!
st.header(title_dict[activity_option])=True, height=500) st.plotly_chart(fig, use_container_width
Which Image Model is best?
Settings
Training
Huggingface
huggingface_hub[fastai]
streamlit
plotly
pandas statsmodels
Overwriting requirements.txt
import nbdev; nbdev.export.nb_export("03a_image_archs.ipynb", lib_path=".")
from huggingface_hub import create_repo, HfApi, notebook_login
notebook_login()
= "rahuketu86/Space-ImageArchs"
space_repo ='space', space_sdk='streamlit', exist_ok=True) create_repo(space_repo, repo_type
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[53], line 2 1 space_repo = "rahuketu86/Space-ImageArchs" ----> 2 create_repo(space_repo, repo_type='space', space_sdk='streamlit', exist_ok=True) File /opt/homebrew/Caskroom/miniforge/base/envs/aiking/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py:118, in validate_hf_hub_args.<locals>._inner_fn(*args, **kwargs) 115 if check_use_auth_token: 116 kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs) --> 118 return fn(*args, **kwargs) File /opt/homebrew/Caskroom/miniforge/base/envs/aiking/lib/python3.9/site-packages/huggingface_hub/hf_api.py:2711, in HfApi.create_repo(self, repo_id, token, private, repo_type, exist_ok, space_sdk, space_hardware, space_storage, space_sleep_time, space_secrets, space_variables) 2707 if getattr(self, "_lfsmultipartthresh", None): 2708 # Testing purposes only. 2709 # See https://github.com/huggingface/huggingface_hub/pull/733/files#r820604472 2710 json["lfsmultipartthresh"] = self._lfsmultipartthresh # type: ignore -> 2711 headers = self._build_hf_headers(token=token, is_write_action=True) 2713 while True: 2714 r = get_session().post(path, headers=headers, json=json) File /opt/homebrew/Caskroom/miniforge/base/envs/aiking/lib/python3.9/site-packages/huggingface_hub/hf_api.py:6418, in HfApi._build_hf_headers(self, token, is_write_action, library_name, library_version, user_agent) 6415 if token is None: 6416 # Cannot do `token = token or self.token` as token can be `False`. 6417 token = self.token -> 6418 return build_hf_headers( 6419 token=token, 6420 is_write_action=is_write_action, 6421 library_name=library_name or self.library_name, 6422 library_version=library_version or self.library_version, 6423 user_agent=user_agent or self.user_agent, 6424 ) File /opt/homebrew/Caskroom/miniforge/base/envs/aiking/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py:118, in validate_hf_hub_args.<locals>._inner_fn(*args, **kwargs) 115 if check_use_auth_token: 116 kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs) --> 118 return fn(*args, **kwargs) File /opt/homebrew/Caskroom/miniforge/base/envs/aiking/lib/python3.9/site-packages/huggingface_hub/utils/_headers.py:122, in build_hf_headers(token, is_write_action, library_name, library_version, user_agent) 120 # Get auth token to send 121 token_to_send = get_token_to_send(token) --> 122 _validate_token_to_send(token_to_send, is_write_action=is_write_action) 124 # Combine headers 125 headers = { 126 "user-agent": _http_user_agent( 127 library_name=library_name, (...) 130 ) 131 } File /opt/homebrew/Caskroom/miniforge/base/envs/aiking/lib/python3.9/site-packages/huggingface_hub/utils/_headers.py:172, in _validate_token_to_send(token, is_write_action) 170 if is_write_action: 171 if token is None: --> 172 raise ValueError( 173 "Token is required (write-access action) but no token found. You need" 174 " to provide a token or be logged in to Hugging Face with" 175 " `huggingface-cli login` or `huggingface_hub.login`. See" 176 " https://huggingface.co/settings/tokens." 177 ) 178 if token.startswith("api_org"): 179 raise ValueError( 180 "You must use your personal account token for write-access methods. To" 181 " generate a write-access token, go to" 182 " https://huggingface.co/settings/tokens" 183 ) ValueError: Token is required (write-access action) but no token found. You need to provide a token or be logged in to Hugging Face with `huggingface-cli login` or `huggingface_hub.login`. See https://huggingface.co/settings/tokens.
=['requirements.txt', 'app.py']; files_to_upload files_to_upload
['requirements.txt', 'app.py']
= HfApi() api
for fname in files_to_upload:
=fname, path_in_repo=fname, repo_id=space_repo, repo_type='space') api.upload_file(path_or_fileobj
map(lambda e: Path(e).unlink()) L(files_to_upload).