From bbea7d8df55e15f8d060b4a27c386e0ca667c38b Mon Sep 17 00:00:00 2001 From: Bel LaPointe Date: Mon, 8 Aug 2022 16:51:06 -0600 Subject: [PATCH] update --- .gitignore | 3 +++ download.py | 28 ++++++++++++++++++++++++ run.sh | 47 +++++++++++++++++++++++++++++++++++++++++ tabtab.requirements.txt | 5 +++++ 4 files changed, 83 insertions(+) create mode 100644 download.py create mode 100644 run.sh create mode 100644 tabtab.requirements.txt diff --git a/.gitignore b/.gitignore index 78fd378..4665c6d 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,4 @@ **/*.sw* +/tabtab +/gpt2-large +/models diff --git a/download.py b/download.py new file mode 100644 index 0000000..54e4bb6 --- /dev/null +++ b/download.py @@ -0,0 +1,28 @@ +import os +import sys +import requests +from tqdm import tqdm + +if len(sys.argv) != 2: + print('You must enter the model name as a parameter, e.g.: download_model.py 124M') + sys.exit(1) + +model = sys.argv[1] + +subdir = os.path.join('models', model) +if not os.path.exists(subdir): + os.makedirs(subdir) +subdir = subdir.replace('\\','/') # needed for Windows + +for filename in ['checkpoint','encoder.json','hparams.json','model.ckpt.data-00000-of-00001', 'model.ckpt.index', 'model.ckpt.meta', 'vocab.bpe']: + + r = requests.get("https://openaipublic.blob.core.windows.net/gpt-2/" + subdir + "/" + filename, stream=True) + + with open(os.path.join(subdir, filename), 'wb') as f: + file_size = int(r.headers["content-length"]) + chunk_size = 1000 + with tqdm(ncols=100, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar: + # 1k for chunk_size, since Ethernet packet size is around 1500 bytes + for chunk in r.iter_content(chunk_size=chunk_size): + f.write(chunk) + pbar.update(chunk_size) diff --git a/run.sh b/run.sh new file mode 100644 index 0000000..103c80e --- /dev/null +++ b/run.sh @@ -0,0 +1,47 @@ +#! /bin/bash + +set -e +set -o pipefail + +if ! grep tabtab .gitignore; then + echo >> .gitignore + echo tabtab >> .gitignore +fi + +image_ui=bel/tabtab:ui-$(date +%Y%m%d) +if ! docker images | grep ${image_ui##*:}; then + if ! [ -d ./tabtab ]; then + git clone https://github.com/ainize-team/tabtab.git + fi + pushd tabtab + cp ../tabtab.requirements.txt ./requirements.txt + docker build -t $image_ui . + popd +fi + +if ! [ -d ./models ]; then + mkdir ./models + python3 -c 'from tqdm import tqdm' &> /dev/null || pip3 install tqdm + python3 ./download.py 124M +fi + +image_server=${image_ui//:ui/:server} +if ! docker images | grep ${image_server##*:}; then + if ! [ -d ./gpt2-large ]; then + git clone https://github.com/Henriquepheak/gpt2-large.git + fi + pushd ./gpt2-large + docker build -t $image_server . + popd +fi + +cleanup() { + docker rm -f $(docker ps -aq) +} +trap cleanup EXIT + +localhost=$(ifconfig | grep -o '192.168[^ ]*' | head -n 1) +docker run -p 12313:80 --rm -d --name tabtab-server $image_server +docker run -p 12314:80 --rm -d --name tabtab-ui -e GPT2_SERVER_URL=http://$localhost:12313 $image_ui + +docker logs -f $image_ui diff --git a/tabtab.requirements.txt b/tabtab.requirements.txt new file mode 100644 index 0000000..f8edeb6 --- /dev/null +++ b/tabtab.requirements.txt @@ -0,0 +1,5 @@ +Flask==1.1.4 +requests==2.24.0 +transformers +#Jinja2==3.0.3 +markupsafe==2.0.1 \ No newline at end of file