From 524d5a6c04b359a4d9998e42e6a2c89a47463904 Mon Sep 17 00:00:00 2001 From: derickson Date: Thu, 14 Dec 2023 14:15:34 -0500 Subject: [PATCH 1/4] changes for openai --- notebooks/Session_1.ipynb | 73 ++++++++++++++++++++------------------- notebooks/Session_2.ipynb | 3 +- streamlit/app.py | 16 +++++---- 3 files changed, 49 insertions(+), 43 deletions(-) diff --git a/notebooks/Session_1.ipynb b/notebooks/Session_1.ipynb index 9628c6d..2353fb2 100644 --- a/notebooks/Session_1.ipynb +++ b/notebooks/Session_1.ipynb @@ -146,11 +146,10 @@ "outputs": [], "source": [ "! pip install --upgrade pip\n", - "! pip install -qqq --no-cache-dir torch\n", - "! pip install -qqq transformers sentencepiece\n", - "! pip install -qqq xformers\n", - "! pip install -qqq python-dotenv\n", - "! pip install -qqq \"openai<1.0.0\" ## for later in the lab" + "! pip install -q --no-cache-dir torch\n", + "! pip install -q transformers sentencepiece\n", + "! pip install -q python-dotenv\n", + "! pip install -qqq tiktoken==0.5.2 cohere==4.38 openai==1.3.9 ## for later in the lab" ] }, { @@ -332,6 +331,9 @@ "metadata": {}, "outputs": [], "source": [ + "## Let's play with something a little bigger that can do a text completion\n", + "## This is a 3 GB download and takes some RAM to run, but it works CPU only\n", + "\n", "from transformers import pipeline\n", "from transformers import AutoTokenizer, AutoModelForSeq2SeqLM\n", "\n", @@ -371,12 +373,15 @@ }, "outputs": [], "source": [ - "countries = [\"United Kingdom\",\n", - " \"France\",\n", - " \"People's Republic of China\",\n", - " \"United States\",\n", - " \"Ecuador\",\n", - " \"Faketopia\"]\n", + "countries = [\n", + " \"United Kingdom\",\n", + " \"France\",\n", + " \"People's Republic of China\",\n", + " \"United States\",\n", + " \"Ecuador\",\n", + " \"Freedonia\", ## high hallucination potential\n", + " \"Faketopia\" ## high hallucination potential\n", + " ]\n", "\n", "for country in countries:\n", " input_text = f\"The capital of the {country} is\"\n", @@ -408,7 +413,7 @@ }, "outputs": [], "source": [ - "prompt_text = \"The current Prime Minister of the united kingdom is \"\n", + "prompt_text = \"The current Prime Minister of the united kingdom is \" ## high stale data potential\n", "output = llm_pipe(prompt_text)\n", "completed_prompt = f\"\\033[94m{prompt_text}\\033[0m {output[0]['generated_text']}\"\n", "print(completed_prompt)" @@ -469,6 +474,7 @@ "source": [ "import os, secrets, requests\n", "import openai\n", + "from openai import OpenAI\n", "from requests.auth import HTTPBasicAuth\n", "\n", "#if using the Elastic AI proxy, then generate the correct API key\n", @@ -492,27 +498,22 @@ "\n", "# Call the OpenAI ChatCompletion API\n", "def chatCompletion(messages):\n", - " if os.environ[\"ELASTIC_PROXY\"] == \"True\":\n", - " completion = openai.ChatCompletion.create(\n", - " model=openai.default_model,\n", - " max_tokens=100,\n", - " messages=messages\n", - " )\n", - " else:\n", - " completion = openai.ChatCompletion.create(\n", - " engine=openai.default_model,\n", - " max_tokens=100,\n", - " messages=messages\n", - " )\n", + " client = OpenAI(api_key=openai.api_key, base_url=openai.api_base)\n", + " completion = client.chat.completions.create(\n", + " model=openai.default_model,\n", + " max_tokens=100,\n", + " messages=messages\n", + " )\n", " return completion\n", "\n", "def chatWithGPT(prompt, print_full_json=False):\n", - " response_text = chatCompletion([{\"role\": \"user\", \"content\": prompt}])\n", + " completion = chatCompletion([{\"role\": \"user\", \"content\": prompt}])\n", + " response_text = completion.choices[0].message.content\n", "\n", " if print_full_json:\n", - " json_pretty(response_text)\n", + " print(completion.json())\n", "\n", - " return wrap_text(response_text.choices[0].message.content,70)\n", + " return wrap_text(response_text,70)\n", "\n", "## call it with the json debug output enabled\n", "response = chatWithGPT(\"Hello, is ChatGPT online and working?\", print_full_json=True)\n", @@ -584,18 +585,15 @@ "You are an unhelpful AI named Captain LLM_Beard that talks like a pirate in short responses.\n", "You acknowledge the user's question but redirect all conversations towards your love of treasure.\n", "\"\"\"\n", - "\n", - " response_text = chatCompletion(\n", - " [\n", + " completion = chatCompletion([\n", " {\"role\": \"system\", \"content\": system_prompt},\n", " {\"role\": \"user\", \"content\": prompt}\n", - " ]\n", - " )\n", - "\n", + " ])\n", + " response_text = completion.choices[0].message.content\n", " if print_full_json:\n", - " json_pretty(response_text)\n", + " print(completion.json())\n", "\n", - " return wrap_text(response_text.choices[0].message.content,70)\n", + " return wrap_text(response_text,70)\n", "\n", "hold_a_conversation(pirateGPT)" ] @@ -701,9 +699,12 @@ " concatenated_message = [system_prompt] + memory_buffer.peek()\n", "\n", " ## here is the request to the AI\n", - " completion = chatCompletion(concatenated_message)\n", "\n", + " completion = chatCompletion(concatenated_message)\n", " response_text = completion.choices[0].message.content\n", + " if print_full_json:\n", + " print(completion.json())\n", + "\n", "\n", " ## don't forget to add the repsonse to the conversation memory\n", " memory_buffer.enqueue({\"role\":\"assistant\", \"content\":response_text})\n", diff --git a/notebooks/Session_2.ipynb b/notebooks/Session_2.ipynb index 0b014f0..3167ef7 100644 --- a/notebooks/Session_2.ipynb +++ b/notebooks/Session_2.ipynb @@ -45,7 +45,8 @@ }, "outputs": [], "source": [ - "! pip install -q streamlit \"openai<1.0.0\" elasticsearch elastic-apm inquirer python-dotenv\n", + "! pip install -qqq tiktoken==0.5.2 cohere==4.38 openai==1.3.9\n", + "! pip install -q streamlit elasticsearch elastic-apm inquirer python-dotenv\n", "\n", "import os, inquirer, re, secrets, requests\n", "import streamlit as st\n", diff --git a/streamlit/app.py b/streamlit/app.py index 5c25456..2fbaf8c 100644 --- a/streamlit/app.py +++ b/streamlit/app.py @@ -1,10 +1,12 @@ import os import streamlit as st import openai +from openai import OpenAI from elasticsearch import Elasticsearch from string import Template import elasticapm + # Configure OpenAI client openai.api_key = os.environ['OPENAI_API_KEY'] openai.api_base = os.environ['OPENAI_API_BASE'] @@ -200,13 +202,15 @@ def chat_gpt(prompt, max_tokens=1024, max_context_tokens=4000, safety_margin=5, # Make the right OpenAI call depending on the API we're using if(os.environ["ELASTIC_PROXY"] == "True"): - response = openai.ChatCompletion.create(model=openai.default_model, + client = OpenAI(api_key=openai.api_key, base_url=openai.api_base) + response = client.chat.completions.create(model=openai.default_model, temperature=0, messages=[{"role": "system", "content": sys_content}, {"role": "user", "content": truncated_prompt}] ) else: - response = openai.ChatCompletion.create(engine=openai.default_model, + client = OpenAI(api_key=openai.api_key, base_url=openai.api_base) + response = client.chat.completions.create(engine=openai.default_model, temperature=0, messages=[{"role": "system", "content": sys_content}, {"role": "user", "content": truncated_prompt}] @@ -216,12 +220,12 @@ def chat_gpt(prompt, max_tokens=1024, max_context_tokens=4000, safety_margin=5, # APM: add metadata labels of data we want to capture elasticapm.label(model = openai.default_model) elasticapm.label(prompt = prompt) - elasticapm.label(total_tokens = response["usage"]["total_tokens"]) - elasticapm.label(prompt_tokens = response["usage"]["prompt_tokens"]) - elasticapm.label(response_tokens = response["usage"]["completion_tokens"]) + elasticapm.label(total_tokens = response.usage.total_tokens) + elasticapm.label(prompt_tokens = response.usage.prompt_tokens) + elasticapm.label(response_tokens = response.usage.completion_tokens) if 'USER_HASH' in os.environ: elasticapm.label(user = os.environ['USER_HASH']) - return response["choices"][0]["message"]["content"] + return response.choices[0].message.content def toLLM(resp, url, usr_prompt, sys_prompt, neg_resp, show_prompt): prompt_template = Template(usr_prompt) From e61d25a5ad70d0e380a31e560a50f1b45c4805aa Mon Sep 17 00:00:00 2001 From: derickson Date: Thu, 14 Dec 2023 14:37:47 -0500 Subject: [PATCH 2/4] fixes, reduced cpu, requirements --- .devcontainer/devcontainer.json | 2 +- .devcontainer/requirements.txt | 201 +++++++++++++++++++++++++++++--- notebooks/Session_1.ipynb | 4 +- 3 files changed, 190 insertions(+), 17 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index ad1c88a..9d182be 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -3,7 +3,7 @@ "dockerfile": "Dockerfile" }, "hostRequirements": { - "cpus": 4 + "cpus": 2 }, "waitFor": "onCreateCommand", "postAttachCommand": { diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt index a9459a5..4be14d9 100644 --- a/.devcontainer/requirements.txt +++ b/.devcontainer/requirements.txt @@ -1,19 +1,192 @@ -# Default libraries required/wanted for Jupyter and Streamlit +aiohttp==3.9.1 +aiosignal==1.3.1 +altair==5.2.0 +annotated-types==0.6.0 +anyio==4.0.0 +argon2-cffi==23.1.0 +argon2-cffi-bindings==21.2.0 +arrow==1.3.0 +asttokens==2.4.1 +async-lru==2.0.4 +async-timeout==4.0.3 +attrs==23.1.0 +Babel==2.13.1 +backoff==2.2.1 +beautifulsoup4==4.12.2 +bleach==6.1.0 +blessed==1.20.0 +blinker==1.7.0 +cachetools==5.3.2 +certifi==2023.7.22 +cffi==1.16.0 +charset-normalizer==3.3.2 +click==8.1.7 +cohere==4.38 +colorama==0.4.6 +comm==0.2.0 +contourpy==1.2.0 +cycler==0.12.1 +debugpy==1.8.0 +decorator==5.1.1 +defusedxml==0.7.1 +distro==1.8.0 +ecs-logging==2.1.0 +eland==8.11.0 +elastic-apm==6.19.0 +elastic-transport==8.11.0 +elasticsearch==8.11.0 +exceptiongroup==1.1.3 +executing==2.0.1 +fastavro==1.9.1 +fastjsonschema==2.19.0 +filelock==3.13.1 +fonttools==4.44.3 +fqdn==1.5.1 +frozenlist==1.4.0 +fsspec==2023.10.0 +gitdb==4.0.11 +GitPython==3.1.40 +h11==0.14.0 +httpcore==1.0.2 +httpx==0.25.2 +huggingface-hub==0.17.3 +idna==3.4 +importlib-metadata==6.11.0 +inquirer==3.1.3 +ipykernel==6.26.0 +ipython==8.17.2 ipywidgets==8.1.1 +isoduration==20.11.0 +jedi==0.19.1 +Jinja2==3.1.2 +joblib==1.3.2 +json5==0.9.14 +jsonpointer==2.4 +jsonschema==4.20.0 +jsonschema-specifications==2023.11.1 +jupyter-events==0.9.0 +jupyter-lsp==2.2.0 +jupyter-server-mathjax==0.2.6 +jupyter_client==8.6.0 +jupyter_core==5.5.0 +jupyter_server==2.10.1 +jupyter_server_terminals==0.4.4 +jupyterlab==4.0.8 +jupyterlab-pygments==0.2.2 +jupyterlab-widgets==3.0.9 +jupyterlab_git==0.44.0 +jupyterlab_server==2.25.1 +kiwisolver==1.4.5 +markdown-it-py==3.0.0 +MarkupSafe==2.1.3 matplotlib==3.8.1 -numpy==1.23.5 -pandas==1.5.3 -torch==1.12.1 +matplotlib-inline==0.1.6 +mdurl==0.1.2 +mistune==3.0.2 +mpmath==1.3.0 +multidict==6.0.4 +nbclient==0.9.0 +nbconvert==7.11.0 +nbdime==3.2.1 +nbformat==5.9.2 +nest-asyncio==1.5.8 +networkx==3.2.1 +nltk==3.8.1 +notebook_shim==0.2.3 +numpy==1.26.2 +nvidia-cublas-cu12==12.1.3.1 +nvidia-cuda-cupti-cu12==12.1.105 +nvidia-cuda-nvrtc-cu12==12.1.105 +nvidia-cuda-runtime-cu12==12.1.105 +nvidia-cudnn-cu12==8.9.2.26 +nvidia-cufft-cu12==11.0.2.54 +nvidia-curand-cu12==10.3.2.106 +nvidia-cusolver-cu12==11.4.5.107 +nvidia-cusparse-cu12==12.1.0.106 +nvidia-nccl-cu12==2.18.1 +nvidia-nvjitlink-cu12==12.3.101 +nvidia-nvtx-cu12==12.1.105 +openai==1.3.9 +overrides==7.4.0 +packaging==23.2 +pandas==2.1.3 +pandocfilters==1.5.0 +parso==0.8.3 +pexpect==4.8.0 +Pillow==10.1.0 +platformdirs==4.0.0 +plotly==5.18.0 +prometheus-client==0.18.0 +prompt-toolkit==3.0.41 +protobuf==4.25.1 +psutil==5.9.6 +ptyprocess==0.7.0 +pure-eval==0.2.2 +pyarrow==14.0.1 +pycparser==2.21 +pydantic==2.5.2 +pydantic_core==2.14.5 +pydeck==0.8.1b0 +Pygments==2.16.1 +pyparsing==3.1.1 +python-dateutil==2.8.2 +python-dotenv==1.0.0 +python-editor==1.0.4 +python-json-logger==2.0.7 +pytz==2023.3.post1 +PyYAML==6.0.1 +pyzmq==25.1.1 +readchar==4.0.5 +referencing==0.31.0 +regex==2023.10.3 +requests==2.31.0 +rfc3339-validator==0.1.4 +rfc3986-validator==0.1.1 +rich==13.7.0 +rpds-py==0.13.0 +safetensors==0.4.1 +scikit-learn==1.3.2 +scipy==1.11.3 +seaborn==0.13.0 +Send2Trash==1.8.2 +sentence-transformers==2.2.2 +sentencepiece==0.1.99 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.0 +soupsieve==2.5 +stack-data==0.6.3 +streamlit==1.28.1 +sympy==1.12 +tenacity==8.2.3 +terminado==0.18.0 +threadpoolctl==3.2.0 +tiktoken==0.5.2 +tinycss2==1.2.1 +tokenizers==0.14.1 +toml==0.10.2 +tomli==2.0.1 +toolz==0.12.0 +torch==2.1.1 torchvision==0.13.1 +tornado==6.3.3 tqdm==4.64.0 -streamlit==1.28.1 -#Add Custom worlshop packages below: -openai==0.28.1 -elasticsearch==8.11.0 -eland==8.11.0 +traitlets==5.13.0 transformers==4.35.0 -sentence_transformers==2.2.2 -python-dotenv==1.0.0 -elastic-apm==6.19.0 -inquirer==3.1.3 -sentencepiece==0.1.99 \ No newline at end of file +triton==2.1.0 +types-python-dateutil==2.8.19.14 +typing_extensions==4.8.0 +tzdata==2023.3 +tzlocal==5.2 +uri-template==1.3.0 +urllib3==2.0.7 +validators==0.22.0 +watchdog==3.0.0 +wcwidth==0.2.10 +webcolors==1.13 +webencodings==0.5.1 +websocket-client==1.6.4 +widgetsnbextension==4.0.9 +wrapt==1.14.1 +yarl==1.9.4 +zipp==3.17.0 diff --git a/notebooks/Session_1.ipynb b/notebooks/Session_1.ipynb index 2353fb2..0fb81b1 100644 --- a/notebooks/Session_1.ipynb +++ b/notebooks/Session_1.ipynb @@ -511,7 +511,7 @@ " response_text = completion.choices[0].message.content\n", "\n", " if print_full_json:\n", - " print(completion.json())\n", + " print(completion.model_dump_json())\n", "\n", " return wrap_text(response_text,70)\n", "\n", @@ -591,7 +591,7 @@ " ])\n", " response_text = completion.choices[0].message.content\n", " if print_full_json:\n", - " print(completion.json())\n", + " print(completion.model_dump_json())\n", "\n", " return wrap_text(response_text,70)\n", "\n", From 3105ec0791313eb4143b2b318d53a18f20becfff Mon Sep 17 00:00:00 2001 From: derickson Date: Thu, 14 Dec 2023 14:45:05 -0500 Subject: [PATCH 3/4] fix1 --- .devcontainer/requirements.txt | 200 +++------------------------------ 1 file changed, 14 insertions(+), 186 deletions(-) diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt index 4be14d9..0b025e6 100644 --- a/.devcontainer/requirements.txt +++ b/.devcontainer/requirements.txt @@ -1,192 +1,20 @@ -aiohttp==3.9.1 -aiosignal==1.3.1 -altair==5.2.0 -annotated-types==0.6.0 -anyio==4.0.0 -argon2-cffi==23.1.0 -argon2-cffi-bindings==21.2.0 -arrow==1.3.0 -asttokens==2.4.1 -async-lru==2.0.4 -async-timeout==4.0.3 -attrs==23.1.0 -Babel==2.13.1 -backoff==2.2.1 -beautifulsoup4==4.12.2 -bleach==6.1.0 -blessed==1.20.0 -blinker==1.7.0 -cachetools==5.3.2 -certifi==2023.7.22 -cffi==1.16.0 -charset-normalizer==3.3.2 -click==8.1.7 -cohere==4.38 -colorama==0.4.6 -comm==0.2.0 -contourpy==1.2.0 -cycler==0.12.1 -debugpy==1.8.0 -decorator==5.1.1 -defusedxml==0.7.1 -distro==1.8.0 -ecs-logging==2.1.0 -eland==8.11.0 -elastic-apm==6.19.0 -elastic-transport==8.11.0 -elasticsearch==8.11.0 -exceptiongroup==1.1.3 -executing==2.0.1 -fastavro==1.9.1 -fastjsonschema==2.19.0 -filelock==3.13.1 -fonttools==4.44.3 -fqdn==1.5.1 -frozenlist==1.4.0 -fsspec==2023.10.0 -gitdb==4.0.11 -GitPython==3.1.40 -h11==0.14.0 -httpcore==1.0.2 -httpx==0.25.2 -huggingface-hub==0.17.3 -idna==3.4 -importlib-metadata==6.11.0 -inquirer==3.1.3 -ipykernel==6.26.0 -ipython==8.17.2 ipywidgets==8.1.1 -isoduration==20.11.0 -jedi==0.19.1 -Jinja2==3.1.2 -joblib==1.3.2 -json5==0.9.14 -jsonpointer==2.4 -jsonschema==4.20.0 -jsonschema-specifications==2023.11.1 -jupyter-events==0.9.0 -jupyter-lsp==2.2.0 -jupyter-server-mathjax==0.2.6 -jupyter_client==8.6.0 -jupyter_core==5.5.0 -jupyter_server==2.10.1 -jupyter_server_terminals==0.4.4 -jupyterlab==4.0.8 -jupyterlab-pygments==0.2.2 -jupyterlab-widgets==3.0.9 -jupyterlab_git==0.44.0 -jupyterlab_server==2.25.1 -kiwisolver==1.4.5 -markdown-it-py==3.0.0 -MarkupSafe==2.1.3 matplotlib==3.8.1 -matplotlib-inline==0.1.6 -mdurl==0.1.2 -mistune==3.0.2 -mpmath==1.3.0 -multidict==6.0.4 -nbclient==0.9.0 -nbconvert==7.11.0 -nbdime==3.2.1 -nbformat==5.9.2 -nest-asyncio==1.5.8 -networkx==3.2.1 -nltk==3.8.1 -notebook_shim==0.2.3 -numpy==1.26.2 -nvidia-cublas-cu12==12.1.3.1 -nvidia-cuda-cupti-cu12==12.1.105 -nvidia-cuda-nvrtc-cu12==12.1.105 -nvidia-cuda-runtime-cu12==12.1.105 -nvidia-cudnn-cu12==8.9.2.26 -nvidia-cufft-cu12==11.0.2.54 -nvidia-curand-cu12==10.3.2.106 -nvidia-cusolver-cu12==11.4.5.107 -nvidia-cusparse-cu12==12.1.0.106 -nvidia-nccl-cu12==2.18.1 -nvidia-nvjitlink-cu12==12.3.101 -nvidia-nvtx-cu12==12.1.105 +numpy==1.23.5 +pandas==1.5.3 +torch==1.12.1 +torchvision==0.13.1 +tqdm==4.64.0 +streamlit==1.28.1 +#Add Custom worlshop packages below: openai==1.3.9 -overrides==7.4.0 -packaging==23.2 -pandas==2.1.3 -pandocfilters==1.5.0 -parso==0.8.3 -pexpect==4.8.0 -Pillow==10.1.0 -platformdirs==4.0.0 -plotly==5.18.0 -prometheus-client==0.18.0 -prompt-toolkit==3.0.41 -protobuf==4.25.1 -psutil==5.9.6 -ptyprocess==0.7.0 -pure-eval==0.2.2 -pyarrow==14.0.1 -pycparser==2.21 -pydantic==2.5.2 -pydantic_core==2.14.5 -pydeck==0.8.1b0 -Pygments==2.16.1 -pyparsing==3.1.1 -python-dateutil==2.8.2 +elasticsearch==8.11.0 +eland==8.11.0 +transformers==4.35.0 +sentence_transformers==2.2.2 python-dotenv==1.0.0 -python-editor==1.0.4 -python-json-logger==2.0.7 -pytz==2023.3.post1 -PyYAML==6.0.1 -pyzmq==25.1.1 -readchar==4.0.5 -referencing==0.31.0 -regex==2023.10.3 -requests==2.31.0 -rfc3339-validator==0.1.4 -rfc3986-validator==0.1.1 -rich==13.7.0 -rpds-py==0.13.0 -safetensors==0.4.1 -scikit-learn==1.3.2 -scipy==1.11.3 -seaborn==0.13.0 -Send2Trash==1.8.2 -sentence-transformers==2.2.2 +elastic-apm==6.19.0 +inquirer==3.1.3 sentencepiece==0.1.99 -six==1.16.0 -smmap==5.0.1 -sniffio==1.3.0 -soupsieve==2.5 -stack-data==0.6.3 -streamlit==1.28.1 -sympy==1.12 -tenacity==8.2.3 -terminado==0.18.0 -threadpoolctl==3.2.0 tiktoken==0.5.2 -tinycss2==1.2.1 -tokenizers==0.14.1 -toml==0.10.2 -tomli==2.0.1 -toolz==0.12.0 -torch==2.1.1 -torchvision==0.13.1 -tornado==6.3.3 -tqdm==4.64.0 -traitlets==5.13.0 -transformers==4.35.0 -triton==2.1.0 -types-python-dateutil==2.8.19.14 -typing_extensions==4.8.0 -tzdata==2023.3 -tzlocal==5.2 -uri-template==1.3.0 -urllib3==2.0.7 -validators==0.22.0 -watchdog==3.0.0 -wcwidth==0.2.10 -webcolors==1.13 -webencodings==0.5.1 -websocket-client==1.6.4 -widgetsnbextension==4.0.9 -wrapt==1.14.1 -yarl==1.9.4 -zipp==3.17.0 +cohere==4.38 \ No newline at end of file From 805669d75b2bbfc27f356d669d1ec3b9fe259786 Mon Sep 17 00:00:00 2001 From: derickson Date: Thu, 14 Dec 2023 14:56:09 -0500 Subject: [PATCH 4/4] need 4 cpus --- .devcontainer/devcontainer.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 9d182be..ad1c88a 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -3,7 +3,7 @@ "dockerfile": "Dockerfile" }, "hostRequirements": { - "cpus": 2 + "cpus": 4 }, "waitFor": "onCreateCommand", "postAttachCommand": {