atharvapawar commited on
Commit
a4eb73a
·
verified ·
1 Parent(s): 6e3972e

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +77 -0
  2. blogGen.py +39 -0
  3. details.txt +13 -0
  4. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # download model from here : https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
2
+ # model file name : llama-2-7b-chat.ggmlv3.q8_0.bin
3
+ # Tut Ref : https://www.youtube.com/watch?v=_kYJZP1G9mE&t=754s
4
+ # Github Link : https://github.com/SharathRaju489/Email-Generator-App-Langchain-LLAMA2-LLM/tree/main
5
+
6
+ # Credit : https://github.com/SharathRaju489
7
+
8
+ import streamlit as st
9
+ from langchain.prompts import PromptTemplate
10
+ from langchain.llms import CTransformers
11
+
12
+ #Function to get the response back
13
+ def getLLMResponse(form_input,email_sender,email_recipient,email_style):
14
+ #llm = OpenAI(temperature=.9, model="text-davinci-003")
15
+
16
+ # Wrapper for Llama-2-7B-Chat, Running Llama 2 on CPU
17
+
18
+ #Quantization is reducing model precision by converting weights from 16-bit floats to 8-bit integers,
19
+ #enabling efficient deployment on resource-limited devices, reducing model size, and maintaining performance.
20
+
21
+ #C Transformers offers support for various open-source models,
22
+ #among them popular ones like Llama, GPT4All-J, MPT, and Falcon.
23
+
24
+
25
+ #C Transformers is the Python library that provides bindings for transformer models implemented in C/C++ using the GGML library
26
+
27
+ llm = CTransformers(model='model/llama-2-7b-chat.ggmlv3.q8_0.bin', #https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
28
+ model_type='llama',
29
+ config={'max_new_tokens': 256,
30
+ 'temperature': 0.01})
31
+
32
+
33
+ #Template for building the PROMPT
34
+ template = """
35
+ Write a email with {style} style and includes topic :{email_topic}.\n\nSender: {sender}\nRecipient: {recipient}
36
+ \n\nEmail Text:
37
+
38
+ """
39
+
40
+ #Creating the final PROMPT
41
+ prompt = PromptTemplate(
42
+ input_variables=["style","email_topic","sender","recipient"],
43
+ template=template,)
44
+
45
+
46
+ #Generating the response using LLM
47
+ response=llm(prompt.format(email_topic=form_input,sender=email_sender,recipient=email_recipient,style=email_style))
48
+ print(response)
49
+
50
+ return response
51
+
52
+
53
+ st.set_page_config(page_title="Generate Emails",
54
+ page_icon='📧',
55
+ layout='centered',
56
+ initial_sidebar_state='collapsed')
57
+ st.header("Generate Emails 📧")
58
+
59
+ form_input = st.text_area('Enter the email topic', height=275)
60
+
61
+ #Creating columns for the UI - To receive inputs from user
62
+ col1, col2, col3 = st.columns([10, 10, 5])
63
+ with col1:
64
+ email_sender = st.text_input('Sender Name')
65
+ with col2:
66
+ email_recipient = st.text_input('Recipient Name')
67
+ with col3:
68
+ email_style = st.selectbox('Writing Style',
69
+ ('Formal', 'Appreciating', 'Not Satisfied', 'Neutral'),
70
+ index=0)
71
+
72
+
73
+ submit = st.button("Generate")
74
+
75
+ #When 'Generate' button is clicked, execute the below code
76
+ if submit:
77
+ st.write(getLLMResponse(form_input,email_sender,email_recipient,email_style))
blogGen.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain.prompts import PromptTemplate
3
+ from langchain.llms import CTransformers
4
+
5
+ model_path = 'model/llama-2-7b-chat.ggmlv3.q8_0.bin'
6
+
7
+ def get_llama_response(input_text, no_words, blog_style):
8
+ # Ensure the model file exists
9
+ if not os.path.exists(model_path)
10
+ print(f"Error: Model file '{model_path}' not found.")
11
+ return None
12
+
13
+ # Initialize the LLM
14
+ try:
15
+ llm = CTransformers(model=model_path, model_type='llama', config={'max_new_tokens': 256, 'temperature': 0.01})
16
+ except RuntimeError as e:
17
+ print(f"Error: Could not create model. Error: {e}")
18
+ return None
19
+
20
+ print("Model file found & loaded...")
21
+
22
+ # Prepare the prompt template
23
+ template = f"""Write a blog for {blog_style} job profile on the topic '{input_text}' within {no_words} words."""
24
+
25
+ # Generate the response from the LLama 2 model
26
+ try:
27
+ response = llm(template)
28
+ return response
29
+ except Exception as e:
30
+ print(f"Error: Could not generate response. Error: {e}")
31
+ return None
32
+
33
+ # Example usage
34
+ input_text = "large language model"
35
+ no_words = 50
36
+ blog_style = 'Researchers'
37
+
38
+ model_response = get_llama_response(input_text, no_words, blog_style)
39
+ print(model_response)
details.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pip install sentence-transformers
2
+ pip install uvicorn
3
+ pip install ctransformers
4
+ pip install langchain
5
+ pip install python-box
6
+ #!pip install streamlit
7
+
8
+ # !pip install -r requirements.txt
9
+
10
+
11
+ !pip install --upgrade ctransformers langchain
12
+
13
+ pip show sentence-transformers uvicorn ctransformers langchain python-box
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ sentence-transformers
2
+ uvicorn
3
+ ctransformers
4
+ fastapi
5
+ ipykernel
6
+ langchain
7
+ python-box