File size: 3,123 Bytes
c331343
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import os
import logging
from flask import Flask, render_template, request, jsonify
import g4f
from g4f.client import Client
import os
import logging
from flask import Flask, render_template, request, jsonify
import g4f
from g4f.client import Client


# Configure logging

# Configure logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

# Create Flask app
app = Flask(__name__)
app.secret_key = os.environ.get("SESSION_SECRET", "default_secret_key")

# Initialize g4f client
client = Client()


@app.route('/')
def index():
    return render_template('index.html')


@app.route('/api/chat', methods=['POST'])
def chat():
    try:
        data = request.json
        messages = data.get('messages', [])
        model = data.get('model', 'gpt-4o-mini')

        # Add system prompt
        system_prompt = {
            "role":
            "system",
            "content":
            "You are orion helpful AI assistant. You provide accurate, informative, and friendly responses while keeping them concise and relevant and you are make by Abdullah ali who is 13 years old "
        }

        # Insert system prompt at the beginning if not already present
        if not messages or messages[0].get('role') != 'system':
            messages.insert(0, system_prompt)

        logger.debug(
            f"Sending request to g4f with model: {model} and messages: {messages}"
        )

        # Call the g4f API
        response = client.chat.completions.create(model=model,
                                                  messages=messages,
                                                  web_search=False)

        ai_response = response.choices[0].message.content
        logger.debug(f"Received response from g4f: {ai_response}")

        return jsonify({'status': 'success', 'message': ai_response})
    except Exception as e:
        logger.error(f"Error in chat endpoint: {str(e)}")
        return jsonify({
            'status': 'error',
            'message': f"An error occurred: {str(e)}"
        }), 500



@app.route('/api/conversations/<conversation_id>', methods=['DELETE'])
def delete_conversation(conversation_id):
    try:
        return jsonify({'status': 'success', 'message': f'Conversation {conversation_id} deleted'})
    except Exception as e:
        logger.error(f"Error deleting conversation: {str(e)}")
        return jsonify({
            'status': 'error',
            'message': f"An error occurred: {str(e)}"
        }), 500

@app.route('/api/models', methods=['GET'])
def get_models():
    try:
        # Return a list of available models
        # You can customize this list based on what g4f supports
        models = [{
            "id": "gpt-4o-mini",
            "name": "GPT-4o"
        }]
        return jsonify({'status': 'success', 'models': models})
    except Exception as e:
        logger.error(f"Error in models endpoint: {str(e)}")
        return jsonify({
            'status': 'error',
            'message': f"An error occurred: {str(e)}"
        }), 500


if __name__ == "__main__":
    app.run(host="0.0.0.0", port=5000, debug=True)