diff --git a/src/api_demo.py b/src/api_demo.py
index 0ff4663..e31c8b9 100644
--- a/src/api_demo.py
+++ b/src/api_demo.py
@@ -42,7 +42,7 @@ app = FastAPI()
 
 @app.post("/")
 async def create_item(request: Request):
-    global model, tokenizer, prompt_template, generating_args
+    global model, tokenizer, prompt_template, source_prefix, generating_args
 
     # Parse the request JSON
     json_post_raw = await request.json()
@@ -55,7 +55,7 @@ async def create_item(request: Request):
     temperature = json_post_list.get("temperature", None)
 
     # Tokenize the input prompt
-    input_ids = tokenizer([prompt_template.get_prompt(prompt, history)], return_tensors="pt")["input_ids"]
+    input_ids = tokenizer([prompt_template.get_prompt(prompt, history, source_prefix)], return_tensors="pt")["input_ids"]
     input_ids = input_ids.to(model.device)
 
     # Generation arguments
@@ -94,8 +94,11 @@ async def create_item(request: Request):
 
 
 if __name__ == "__main__":
+
     model_args, data_args, finetuning_args, generating_args = prepare_infer_args()
     model, tokenizer = load_pretrained(model_args, finetuning_args)
+
     prompt_template = Template(data_args.prompt_template)
+    source_prefix = data_args.source_prefix if data_args.source_prefix else ""
 
     uvicorn.run(app, host='0.0.0.0', port=8000, workers=1)
diff --git a/src/cli_demo.py b/src/cli_demo.py
index 7dd92d4..2a501f0 100644
--- a/src/cli_demo.py
+++ b/src/cli_demo.py
@@ -20,9 +20,10 @@ def main():
 
     model_name = "BLOOM" if "bloom" in model_args.model_name_or_path else "LLaMA"
     prompt_template = Template(data_args.prompt_template)
+    source_prefix = data_args.source_prefix if data_args.source_prefix else ""
 
     def predict_and_print(query, history: list) -> list:
-        input_ids = tokenizer([prompt_template.get_prompt(query, history)], return_tensors="pt")["input_ids"]
+        input_ids = tokenizer([prompt_template.get_prompt(query, history, source_prefix)], return_tensors="pt")["input_ids"]
         input_ids = input_ids.to(model.device)
 
         streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
diff --git a/src/web_demo.py b/src/web_demo.py
index ba886ab..d081a9e 100644
--- a/src/web_demo.py
+++ b/src/web_demo.py
@@ -25,6 +25,7 @@ model_args, data_args, finetuning_args, generating_args = prepare_infer_args()
 model, tokenizer = load_pretrained(model_args, finetuning_args)
 
 prompt_template = Template(data_args.prompt_template)
+source_prefix = data_args.source_prefix if data_args.source_prefix else ""
 
 
 def postprocess(self, y):
@@ -79,7 +80,7 @@ def parse_text(text): # copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT
 def predict(query, chatbot, max_length, top_p, temperature, history):
     chatbot.append((parse_text(query), ""))
 
-    input_ids = tokenizer([prompt_template.get_prompt(query, history)], return_tensors="pt")["input_ids"]
+    input_ids = tokenizer([prompt_template.get_prompt(query, history, source_prefix)], return_tensors="pt")["input_ids"]
     input_ids = input_ids.to(model.device)
 
     streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)