Skip to content

Commit 1001c38

Browse files
authored
Merge pull request chenfei-wu#378 from qfyin/azure
Azure OpenAI service support
2 parents 67c6c47 + b957727 commit 1001c38

File tree

4 files changed

+60
-13
lines changed

4 files changed

+60
-13
lines changed

LowCodeLLM/README.md

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@ https://user-images.githubusercontent.com/43716920/233937121-cd057f04-dec8-45b8-
1313

1414
## Quick Start
1515
Please note that due to time constraints, the code we provide is only the minimum viable version of the low-code LLM interactive code, i.e. only demonstrating the core concept of Low-code LLM human-LLM interaction. We welcome anyone who is interested in improving our front-end interface.
16+
Currently, both the `OpenAI API` and `Azure OpenAI Service` are supported. You would be required to provide the requisite information to invoke these APIs.
17+
1618
```
1719
# clone the repo
1820
git clone https://github.com/microsoft/TaskMatrix.git
@@ -22,10 +24,16 @@ cd LowCodeLLM
2224
2325
# build and run docker
2426
docker build -t lowcode:latest .
27+
28+
# If OpenAI API is being used, it is only necessary to provide the API key.
2529
docker run -p 8888:8888 --env OPENAIKEY={Your_Private_Openai_Key} lowcode:latest
2630
27-
# Open the webpage (./src/index.html)
31+
# When using Azure OpenAI Service, it is advisable to store the necessary information in a configuration file for ease of access.
32+
# Kindly duplicate the config.template file and name the copied file as config.ini. Then, fill out the necessary information in the config.ini file.
33+
docker run -p 8888:8888 --env-file config.ini lowcode:latest
2834
```
35+
You can now try it by visiting [Demo page](http://localhost:8888/)
36+
2937

3038
## System Overview
3139

LowCodeLLM/config.template

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
USE_AZURE=True
2+
OPENAIKEY=your-azure-openai-service-key
3+
API_BASE=your-base-url-for-azure
4+
API_VERSION=2023-03-15-preview
5+
MODEL=your-gpt-deployment-name

LowCodeLLM/src/app.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
import os
2-
from flask import Flask, request
2+
from flask import Flask, request, send_from_directory
33
from flask_cors import CORS, cross_origin
44
from lowCodeLLM import lowCodeLLM
55
from flask.logging import default_handler
66
import logging
77

8-
app = Flask('lowcode-llm', static_url_path='', template_folder='')
8+
app = Flask('lowcode-llm', static_folder='', template_folder='')
99
app.debug = True
1010
llm = lowCodeLLM()
1111
gunicorn_logger = logging.getLogger('gunicorn.error')
@@ -14,6 +14,10 @@
1414
'%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
1515
default_handler.setFormatter(logging_format)
1616

17+
@app.route("/")
18+
def index():
19+
return send_from_directory(".", "index.html")
20+
1721
@app.route('/api/get_workflow', methods=['POST'])
1822
@cross_origin()
1923
def get_workflow():

LowCodeLLM/src/openAIWrapper.py

Lines changed: 40 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,28 @@
44
class OpenAIWrapper:
55
def __init__(self, temperature):
66
self.key = os.environ.get("OPENAIKEY")
7-
self.chat_model_id = "gpt-3.5-turbo"
7+
openai.api_key = self.key
8+
9+
# Access the USE_AZURE environment variable
10+
self.use_azure = os.environ.get('USE_AZURE')
11+
12+
# Check if USE_AZURE is defined
13+
if self.use_azure is not None:
14+
# Convert the USE_AZURE value to boolean
15+
self.use_azure = self.use_azure.lower() == 'true'
16+
else:
17+
self.use_azure = False
18+
19+
if self.use_azure:
20+
openai.api_type = "azure"
21+
self.api_base = os.environ.get('API_BASE')
22+
openai.api_base = self.api_base
23+
self.api_version = os.environ.get('API_VERSION')
24+
openai.api_version = self.api_version
25+
self.engine = os.environ.get('MODEL')
26+
else:
27+
self.chat_model_id = "gpt-3.5-turbo"
28+
829
self.temperature = temperature
930
self.max_tokens = 2048
1031
self.top_p = 1
@@ -15,15 +36,24 @@ def run(self, prompt):
1536

1637
def _post_request_chat(self, messages):
1738
try:
18-
openai.api_key = self.key
19-
response = openai.ChatCompletion.create(
20-
model=self.chat_model_id,
21-
messages=messages,
22-
temperature=self.temperature,
23-
max_tokens=self.max_tokens,
24-
frequency_penalty=0,
25-
presence_penalty=0
26-
)
39+
if self.use_azure:
40+
response = openai.ChatCompletion.create(
41+
engine=self.engine,
42+
messages=messages,
43+
temperature=self.temperature,
44+
max_tokens=self.max_tokens,
45+
frequency_penalty=0,
46+
presence_penalty=0
47+
)
48+
else:
49+
response = openai.ChatCompletion.create(
50+
model=self.chat_model_id,
51+
messages=messages,
52+
temperature=self.temperature,
53+
max_tokens=self.max_tokens,
54+
frequency_penalty=0,
55+
presence_penalty=0
56+
)
2757
res = response['choices'][0]['message']['content']
2858
return res, True
2959
except Exception as e:

0 commit comments

Comments
 (0)