mihalykiss commited on
Commit
373c3ad
·
verified ·
1 Parent(s): be98a83

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +225 -81
app.py CHANGED
@@ -1,6 +1,92 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- title = "AI Text Detector"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  description = """
6
  <div class="app-description">
@@ -10,25 +96,13 @@ description = """
10
  <li><span class="icon">🔍</span> <strong>Model Detection:</strong> Capable of identifying content from over 40 AI models.</li>
11
  <li><span class="icon">📈</span> <strong>Accuracy:</strong> Performs optimally with more extensive text inputs.</li>
12
  <li><span class="icon">📄</span> <strong>Read more:</strong> Our methodology is detailed in our research paper:
13
- <a href="https://aclanthology.org/2025.genaidetect-1.15/" target="_blank" class="learn-more-link"><b>LINK</b></a>.
14
  </li>
15
  </ul>
16
  <p class="instruction-text">Paste your text into the field below to analyze its origin.</p>
17
  </div>
18
  """
19
- bottom_text = "<p class='footer-text'>Developed by <strong>SzegedAI</strong></p>"
20
-
21
- # Placeholder for the actual classification function
22
- def classify_text(text):
23
- if not text.strip():
24
- return "<div style='text-align: center; color: #7f8c8d;'>Please enter some text to analyze.</div>"
25
- # Simulate model output
26
- if "bert" in text.lower() or "sentence" in text.lower() or "token" in text.lower():
27
- return "<div class='highlight-human'>Likely Human-Written</div><p style='font-size:0.9em; color: #7f8c8d; margin-top: 5px;'>Based on linguistic patterns.</p>"
28
- elif len(text) > 50 : # Simple heuristic for demo
29
- return "<div class='highlight-ai'>Likely AI-Generated</div><p style='font-size:0.9em; color: #7f8c8d; margin-top: 5px;'>Based on predictive analysis.</p>"
30
- else:
31
- return "<div class='highlight-ai'>Potentially AI-Generated</div><p style='font-size:0.9em; color: #7f8c8d; margin-top: 5px;'>Analysis suggests non-human origin, but more text may improve accuracy.</p>"
32
 
33
  AI_texts = [
34
  "Camels are remarkable desert animals known for their unique adaptations to harsh, arid environments. Native to the Middle East, North Africa, and parts of Asia, camels have been essential to human life for centuries, serving as a mode of transportation, a source of food, and even a symbol of endurance and survival. There are two primary species of camels: the dromedary camel, which has a single hump and is commonly found in the Middle East and North Africa, and the Bactrian camel, which has two humps and is native to Central Asia. Their humps store fat, not water, as commonly believed, allowing them to survive long periods without food by metabolizing the stored fat for energy. Camels are highly adapted to desert life. They can go for weeks without water, and when they do drink, they can consume up to 40 gallons in one sitting. Their thick eyelashes, sealable nostrils, and wide, padded feet protect them from sand and help them walk easily on loose desert terrain.",
@@ -38,38 +112,96 @@ Human_texts = [
38
  "To make BERT handle a variety of down-stream tasks, our input representation is able to unambiguously represent both a single sentence and a pair of sentences (e.g., h Question, Answeri) in one token sequence. Throughout this work, a “sentence” can be an arbitrary span of contiguous text, rather than an actual linguistic sentence. A “sequence” refers to the input token sequence to BERT, which may be a single sentence or two sentences packed together. We use WordPiece embeddings (Wu et al., 2016) with a 30,000 token vocabulary. The first token of every sequence is always a special classification token ([CLS]). The final hidden state corresponding to this token is used as the aggregate sequence representation for classification tasks. Sentence pairs are packed together into a single sequence."
39
  ]
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  modern_css = """
42
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap');
43
 
44
  :root {
45
- --primary-bg: #F8F9FA; /* Light Gray for page body */
46
- --app-bg: #FFFFFF; /* White for app container */
47
- --text-primary: #2C3E50; /* Dark Slate Blue */
48
- --text-secondary: #7F8C8D; /* Cool Gray */
49
- --accent-color: #1ABC9C; /* Teal */
50
- --accent-color-darker: #16A085; /* Darker Teal for hover */
51
- --border-color: #E0E0E0; /* Light Gray for borders */
52
- --input-bg: #FFFFFF; /* White for input fields */
53
  --input-focus-border: var(--accent-color);
54
  --human-color: #2ECC71; /* Green */
55
  --human-bg: rgba(46, 204, 113, 0.1);
56
  --ai-color: #E74C3C; /* Red */
57
  --ai-bg: rgba(231, 76, 60, 0.1);
58
- --shadow-color: rgba(44, 62, 80, 0.1); /* Subtle shadow */
59
- --container-max-width: 700px;
60
  --border-radius-md: 8px;
61
  --border-radius-lg: 12px;
62
  }
63
 
64
  body {
65
  font-family: 'Inter', sans-serif;
66
- background: linear-gradient(135deg, #f5f7fa 0%, #eef2f7 100%); /* Subtle gradient background */
67
  color: var(--text-primary);
68
  margin: 0;
69
  padding: 20px;
70
  display: flex;
71
  justify-content: center;
72
- align-items: flex-start; /* Align to top for scroll */
73
  min-height: 100vh;
74
  box-sizing: border-box;
75
  overflow-y: auto;
@@ -78,28 +210,28 @@ body {
78
  .gradio-container {
79
  background-color: var(--app-bg);
80
  border-radius: var(--border-radius-lg);
81
- padding: clamp(20px, 5vw, 40px); /* Responsive padding */
82
  box-shadow: 0 8px 25px var(--shadow-color);
83
  max-width: var(--container-max-width);
84
  width: 100%;
85
- margin: 20px auto; /* Centering with margin for scroll */
86
- border: none; /* Remove default Gradio border */
87
  }
88
 
89
- /* Hide default Gradio Form styling that might interfere */
90
- .form.svelte-633qhp, .block.svelte-11xb1hd {
91
  background: none !important;
92
  border: none !important;
93
  box-shadow: none !important;
94
- padding: 0 !important; /* Reset padding if it causes issues */
95
  }
96
 
97
- h1 { /* Targets the main title */
 
98
  color: var(--text-primary);
99
- font-size: clamp(24px, 5vw, 32px); /* Responsive font size */
100
  font-weight: 700;
101
  text-align: center;
102
- margin-bottom: 25px;
103
  letter-spacing: -0.5px;
104
  }
105
 
@@ -148,7 +280,7 @@ h1 { /* Targets the main title */
148
  text-decoration: underline;
149
  }
150
 
151
- #text_input_box textarea { /* Targeting the textarea inside the Gradio Textbox component */
152
  background-color: var(--input-bg);
153
  border: 1px solid var(--border-color);
154
  border-radius: var(--border-radius-md);
@@ -158,12 +290,12 @@ h1 { /* Targets the main title */
158
  box-sizing: border-box;
159
  color: var(--text-primary);
160
  transition: border-color 0.3s ease, box-shadow 0.3s ease;
161
- min-height: 120px; /* Ensure enough space for typing */
162
  box-shadow: 0 2px 4px rgba(0,0,0,0.05);
163
  }
164
 
165
  #text_input_box textarea::placeholder {
166
- color: #B0BEC5; /* Lighter placeholder text */
167
  }
168
 
169
  #text_input_box textarea:focus {
@@ -172,8 +304,8 @@ h1 { /* Targets the main title */
172
  outline: none;
173
  }
174
 
175
- #result_output_box { /* Style the Markdown block for results */
176
- background-color: var(--input-bg);
177
  border: 1px solid var(--border-color);
178
  border-radius: var(--border-radius-md);
179
  padding: 20px;
@@ -181,33 +313,43 @@ h1 { /* Targets the main title */
181
  width: 100%;
182
  box-sizing: border-box;
183
  text-align: center;
184
- font-size: clamp(16px, 3vw, 18px);
185
  box-shadow: 0 4px 8px rgba(0,0,0,0.05);
 
 
 
 
 
 
 
 
 
 
 
186
  }
187
 
 
188
  .highlight-human, .highlight-ai {
189
  font-weight: 600;
190
- padding: 8px 15px;
191
  border-radius: var(--border-radius-md);
192
- display: inline-block; /* So padding applies correctly */
193
- font-size: 1.1em;
194
- margin-bottom: 5px; /* Space for sub-text if any */
195
  }
196
 
197
  .highlight-human {
198
  color: var(--human-color);
199
  background-color: var(--human-bg);
200
- border: 1px solid var(--human-color);
201
  }
202
 
203
  .highlight-ai {
204
  color: var(--ai-color);
205
  background-color: var(--ai-bg);
206
- border: 1px solid var(--ai-color);
207
  }
208
 
209
- /* Styling for Gradio Tabs and Examples */
210
- .tabs > div:first-child button { /* Tab buttons */
211
  background-color: transparent !important;
212
  color: var(--text-secondary) !important;
213
  border: none !important;
@@ -218,23 +360,29 @@ h1 { /* Targets the main title */
218
  transition: color 0.3s ease, border-bottom-color 0.3s ease !important;
219
  }
220
 
221
- .tabs > div:first-child button.selected { /* Selected tab */
222
  color: var(--accent-color) !important;
223
  border-bottom-color: var(--accent-color) !important;
224
  font-weight: 600 !important;
225
  }
226
 
227
- .gr-examples { /* Container for examples */
228
  padding: 15px !important;
229
  border: 1px solid var(--border-color) !important;
230
  border-radius: var(--border-radius-md) !important;
231
  background-color: #fdfdfd !important;
 
232
  }
233
- .gr-sample-textbox { /* Example textboxes */
234
  border: 1px solid var(--border-color) !important;
235
  border-radius: var(--border-radius-md) !important;
236
  font-size: 14px !important;
237
  }
 
 
 
 
 
238
 
239
  .footer-text, #bottom_text {
240
  text-align: center;
@@ -242,69 +390,65 @@ h1 { /* Targets the main title */
242
  font-size: clamp(13px, 2vw, 14px);
243
  color: var(--text-secondary);
244
  }
245
- #bottom_text p { /* Ensure p tag inside Markdown inherits */
246
  margin: 0;
247
  }
248
 
249
- /* Responsive adjustments */
250
  @media (max-width: 768px) {
251
  body {
252
  padding: 10px;
253
- align-items: flex-start; /* Keep at top for mobile */
254
  }
255
  .gradio-container {
256
  padding: 20px;
257
- margin: 10px; /* Reduce margin on mobile */
258
- }
259
- h1 {
260
- font-size: 24px;
261
- }
262
- .app-description p, .features-list li {
263
- font-size: 14px;
264
- }
265
- #text_input_box textarea {
266
- font-size: 15px;
267
- min-height: 100px;
268
- }
269
- #result_output_box {
270
- font-size: 16px;
271
- padding: 15px;
272
  }
 
 
 
 
273
  }
274
  """
275
 
276
  iface = gr.Blocks(css=modern_css, theme=gr.themes.Base(font=[gr.themes.GoogleFont("Inter"), "sans-serif"]))
277
 
278
  with iface:
279
- gr.Markdown(f"<h1>{title}</h1>") # Use h1 directly for title
280
- gr.Markdown(description) # HTML is now within the description string
281
 
282
  text_input = gr.Textbox(
283
- label="", # Label is visually less important now due to structure
284
  placeholder="Type or paste your content here...",
285
  elem_id="text_input_box",
286
- lines=6 # Adjusted lines
287
  )
288
- result_output = gr.HTML(elem_id="result_output_box") # Use HTML for richer output formatting
 
 
 
 
 
 
 
289
 
290
- text_input.change(classify_text, inputs=text_input, outputs=result_output)
291
 
292
- with gr.Row(): # Keep examples side-by-side if space allows, stack on mobile by default
293
  with gr.Column(scale=1):
294
  with gr.Accordion("AI Text Examples", open=False):
295
  gr.Examples(
296
  examples=AI_texts,
297
  inputs=text_input,
298
- label="Click an example to load it",
299
  )
300
  with gr.Column(scale=1):
301
  with gr.Accordion("Human Text Examples", open=False):
302
  gr.Examples(
303
  examples=Human_texts,
304
  inputs=text_input,
305
- label="Click an example to load it",
306
  )
307
 
308
  gr.Markdown(bottom_text, elem_id="bottom_text")
309
 
310
- iface.launch(share=False) # Set share=True if you need a public link
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
+ import re
5
+ from tokenizers.normalizers import Replace, Regex, Sequence, Strip
6
+ import os # For checking model file path
7
+
8
+ # --- Model & Tokenizer Configuration ---
9
+ # Check if the local model file exists
10
+ model1_filename = "modernbert.bin"
11
+ if not os.path.exists(model1_filename):
12
+ print(f"Warning: Model file '{model1_filename}' not found. Please ensure it is in the correct directory.")
13
+ # You might want to handle this more gracefully, e.g., by disabling the app or using a fallback.
14
+ # For now, the script will likely fail at model_1.load_state_dict if the file is missing.
15
+
16
+ model1_path = model1_filename
17
+ model2_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed12"
18
+ model3_path = "https://huggingface.co/mihalykiss/modernbert_2/resolve/main/Model_groups_3class_seed22"
19
+
20
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
21
+ print(f"Using device: {device}")
22
+
23
+ try:
24
+ tokenizer = AutoTokenizer.from_pretrained("answerdotai/ModernBERT-base")
25
+
26
+ model_1 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
27
+ if os.path.exists(model1_path):
28
+ model_1.load_state_dict(torch.load(model1_path, map_location=device))
29
+ else:
30
+ # Fallback or error if local model is not found.
31
+ # This part depends on how you want to handle the missing file.
32
+ # For this example, we'll assume it might raise an error later if not handled.
33
+ print(f"ERROR: Local model file '{model1_path}' not found. Model 1 cannot be loaded.")
34
+ # exit() # Or raise an exception
35
+ model_1.to(device).eval()
36
+
37
+ model_2 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
38
+ model_2.load_state_dict(torch.hub.load_state_dict_from_url(model2_path, map_location=device, progress=True))
39
+ model_2.to(device).eval()
40
+
41
+ model_3 = AutoModelForSequenceClassification.from_pretrained("answerdotai/ModernBERT-base", num_labels=41)
42
+ model_3.load_state_dict(torch.hub.load_state_dict_from_url(model3_path, map_location=device, progress=True))
43
+ model_3.to(device).eval()
44
+
45
+ except Exception as e:
46
+ print(f"Error during model loading: {e}")
47
+ print("Please ensure all model paths are correct, dependencies are installed, and you have an internet connection for remote models.")
48
+ # Handle the error, e.g., by exiting or displaying an error in the UI if Gradio is already running.
49
+ # For simplicity, we'll let it potentially crash if models can't load before Gradio starts.
50
+ # If Gradio is already running, you'd need a more sophisticated error display.
51
+ # For now, we'll just make sure the Gradio interface doesn't try to use non-existent models.
52
+ tokenizer = None # Prevent further errors if tokenizer failed
53
+ model_1, model_2, model_3 = None, None, None
54
+
55
+
56
+ label_mapping = {
57
+ 0: '13B', 1: '30B', 2: '65B', 3: '7B', 4: 'GLM130B', 5: 'bloom_7b',
58
+ 6: 'bloomz', 7: 'cohere', 8: 'davinci', 9: 'dolly', 10: 'dolly-v2-12b',
59
+ 11: 'flan_t5_base', 12: 'flan_t5_large', 13: 'flan_t5_small',
60
+ 14: 'flan_t5_xl', 15: 'flan_t5_xxl', 16: 'gemma-7b-it', 17: 'gemma2-9b-it',
61
+ 18: 'gpt-3.5-turbo', 19: 'gpt-35', 20: 'gpt4', 21: 'gpt4o',
62
+ 22: 'gpt_j', 23: 'gpt_neox', 24: 'human', 25: 'llama3-70b', 26: 'llama3-8b',
63
+ 27: 'mixtral-8x7b', 28: 'opt_1.3b', 29: 'opt_125m', 30: 'opt_13b',
64
+ 31: 'opt_2.7b', 32: 'opt_30b', 33: 'opt_350m', 34: 'opt_6.7b',
65
+ 35: 'opt_iml_30b', 36: 'opt_iml_max_1.3b', 37: 't0_11b', 38: 't0_3b',
66
+ 39: 'text-davinci-002', 40: 'text-davinci-003'
67
+ }
68
 
69
+ def clean_text(text: str) -> str:
70
+ text = re.sub(r'\s{2,}', ' ', text)
71
+ text = re.sub(r'\s+([,.;:?!])', r'\1', text)
72
+ return text
73
+
74
+ if tokenizer: # Only set normalizer if tokenizer loaded successfully
75
+ newline_to_space = Replace(Regex(r'\s*\n\s*'), " ")
76
+ join_hyphen_break = Replace(Regex(r'(\w+)[--]\s*\n\s*(\w+)'), r"\1\2") # Corrected hyphen regex
77
+ tokenizer.backend_tokenizer.normalizer = Sequence([
78
+ tokenizer.backend_tokenizer.normalizer, # Keep existing normalizers
79
+ join_hyphen_break,
80
+ newline_to_space,
81
+ Strip()
82
+ ])
83
+ # --- End Model & Tokenizer Configuration ---
84
+
85
+
86
+ title_md = """
87
+ <h1 style="text-align: center; margin-bottom: 5px;">AI Text Detector</h1>
88
+ <p style="text-align: center; font-size: 0.9em; color: var(--text-secondary); margin-top: 0; margin-bottom: 20px;">Developed by SzegedAI</p>
89
+ """
90
 
91
  description = """
92
  <div class="app-description">
 
96
  <li><span class="icon">🔍</span> <strong>Model Detection:</strong> Capable of identifying content from over 40 AI models.</li>
97
  <li><span class="icon">📈</span> <strong>Accuracy:</strong> Performs optimally with more extensive text inputs.</li>
98
  <li><span class="icon">📄</span> <strong>Read more:</strong> Our methodology is detailed in our research paper:
99
+ <a href="https://aclanthology.org/2025.genaidetect-1.15/" target="_blank" class="learn-more-link"><b> LINK</b></a>.
100
  </li>
101
  </ul>
102
  <p class="instruction-text">Paste your text into the field below to analyze its origin.</p>
103
  </div>
104
  """
105
+ bottom_text = "<p class='footer-text'>SzegedAI</p>" # Simplified footer, as requested
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
  AI_texts = [
108
  "Camels are remarkable desert animals known for their unique adaptations to harsh, arid environments. Native to the Middle East, North Africa, and parts of Asia, camels have been essential to human life for centuries, serving as a mode of transportation, a source of food, and even a symbol of endurance and survival. There are two primary species of camels: the dromedary camel, which has a single hump and is commonly found in the Middle East and North Africa, and the Bactrian camel, which has two humps and is native to Central Asia. Their humps store fat, not water, as commonly believed, allowing them to survive long periods without food by metabolizing the stored fat for energy. Camels are highly adapted to desert life. They can go for weeks without water, and when they do drink, they can consume up to 40 gallons in one sitting. Their thick eyelashes, sealable nostrils, and wide, padded feet protect them from sand and help them walk easily on loose desert terrain.",
 
112
  "To make BERT handle a variety of down-stream tasks, our input representation is able to unambiguously represent both a single sentence and a pair of sentences (e.g., h Question, Answeri) in one token sequence. Throughout this work, a “sentence” can be an arbitrary span of contiguous text, rather than an actual linguistic sentence. A “sequence” refers to the input token sequence to BERT, which may be a single sentence or two sentences packed together. We use WordPiece embeddings (Wu et al., 2016) with a 30,000 token vocabulary. The first token of every sequence is always a special classification token ([CLS]). The final hidden state corresponding to this token is used as the aggregate sequence representation for classification tasks. Sentence pairs are packed together into a single sequence."
113
  ]
114
 
115
+ def classify_text_interface(text):
116
+ if not all([tokenizer, model_1, model_2, model_3]):
117
+ return "<p style='text-align: center; color: var(--ai-color);'><strong>Error: Models not loaded. Please check the console.</strong></p>"
118
+
119
+ cleaned_text = clean_text(text)
120
+ if not cleaned_text.strip(): # Check cleaned_text here
121
+ result_message = "<p style='text-align: center; color: var(--text-secondary);'>Please enter some text to analyze.</p>"
122
+ return result_message
123
+
124
+ inputs = tokenizer(cleaned_text, return_tensors="pt", truncation=True, padding=True, max_length=512).to(device) # Added max_length
125
+
126
+ with torch.no_grad():
127
+ logits_1 = model_1(**inputs).logits
128
+ logits_2 = model_2(**inputs).logits
129
+ logits_3 = model_3(**inputs).logits
130
+
131
+ softmax_1 = torch.softmax(logits_1, dim=1)
132
+ softmax_2 = torch.softmax(logits_2, dim=1)
133
+ softmax_3 = torch.softmax(logits_3, dim=1)
134
+
135
+ averaged_probabilities = (softmax_1 + softmax_2 + softmax_3) / 3
136
+ probabilities = averaged_probabilities[0]
137
+
138
+ ai_probs = probabilities.clone()
139
+ human_label_index = -1
140
+ for k, v in label_mapping.items(): # Find the human label index dynamically
141
+ if v.lower() == 'human':
142
+ human_label_index = k
143
+ break
144
+
145
+ if human_label_index != -1:
146
+ ai_probs[human_label_index] = 0 # Zero out human probability for AI sum
147
+ human_prob_value = probabilities[human_label_index].item() * 100
148
+ else: # Fallback if 'human' not in label_mapping (should not happen with current map)
149
+ human_prob_value = 0
150
+ print("Warning: 'human' label not found in label_mapping.")
151
+
152
+ ai_total_prob = ai_probs.sum().item() * 100
153
+
154
+ # Recalculate human_prob based on ai_total_prob if necessary,
155
+ # or ensure the logic correctly identifies human vs AI majority.
156
+ # The original logic: human_prob = 100 - ai_total_prob might be confusing if ai_total_prob already excluded human.
157
+ # Let's use the direct human probability from the model.
158
+
159
+ ai_argmax_index = torch.argmax(ai_probs).item() # Argmax over non-human probabilities
160
+ ai_argmax_model = label_mapping.get(ai_argmax_index, "Unknown AI")
161
+
162
+ if human_prob_value > ai_total_prob : # Compare direct human probability with sum of AI probabilities
163
+ result_message = (
164
+ f"<p><strong>The text is</strong> <span class='highlight-human'><strong>{human_prob_value:.2f}%</strong> likely <b>Human written</b>.</span></p>"
165
+ )
166
+ else:
167
+ result_message = (
168
+ f"<p><strong>The text is</strong> <span class='highlight-ai'><strong>{ai_total_prob:.2f}%</strong> likely <b>AI generated</b>.</span></p>"
169
+ f"<p style='margin-top: 10px; font-size: 0.95em;'><strong>Most Likely AI Source:</strong> {ai_argmax_model} (with {probabilities[ai_argmax_index].item()*100:.2f}% confidence among AI models)</p>"
170
+ )
171
+ return result_message
172
+
173
  modern_css = """
174
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap');
175
 
176
  :root {
177
+ --primary-bg: #F8F9FA;
178
+ --app-bg: #FFFFFF;
179
+ --text-primary: #2C3E50;
180
+ --text-secondary: #7F8C8D;
181
+ --accent-color: #1ABC9C;
182
+ --accent-color-darker: #16A085;
183
+ --border-color: #E0E0E0;
184
+ --input-bg: #FFFFFF;
185
  --input-focus-border: var(--accent-color);
186
  --human-color: #2ECC71; /* Green */
187
  --human-bg: rgba(46, 204, 113, 0.1);
188
  --ai-color: #E74C3C; /* Red */
189
  --ai-bg: rgba(231, 76, 60, 0.1);
190
+ --shadow-color: rgba(44, 62, 80, 0.1);
191
+ --container-max-width: 800px; /* Increased width */
192
  --border-radius-md: 8px;
193
  --border-radius-lg: 12px;
194
  }
195
 
196
  body {
197
  font-family: 'Inter', sans-serif;
198
+ background: linear-gradient(135deg, #f5f7fa 0%, #eef2f7 100%);
199
  color: var(--text-primary);
200
  margin: 0;
201
  padding: 20px;
202
  display: flex;
203
  justify-content: center;
204
+ align-items: flex-start;
205
  min-height: 100vh;
206
  box-sizing: border-box;
207
  overflow-y: auto;
 
210
  .gradio-container {
211
  background-color: var(--app-bg);
212
  border-radius: var(--border-radius-lg);
213
+ padding: clamp(25px, 5vw, 40px);
214
  box-shadow: 0 8px 25px var(--shadow-color);
215
  max-width: var(--container-max-width);
216
  width: 100%;
217
+ margin: 20px auto;
218
+ border: none;
219
  }
220
 
221
+ .form.svelte-633qhp, .block.svelte-11xb1hd, .gradio-html .block { /* More generic selector for Gradio HTML block */
 
222
  background: none !important;
223
  border: none !important;
224
  box-shadow: none !important;
225
+ padding: 0 !important;
226
  }
227
 
228
+ /* Title and subtitle are now handled by Markdown with inline styles, h1 here is a fallback or for other h1s */
229
+ h1 {
230
  color: var(--text-primary);
231
+ font-size: clamp(24px, 5vw, 30px);
232
  font-weight: 700;
233
  text-align: center;
234
+ margin-bottom: 20px; /* Adjusted default h1 margin */
235
  letter-spacing: -0.5px;
236
  }
237
 
 
280
  text-decoration: underline;
281
  }
282
 
283
+ #text_input_box textarea {
284
  background-color: var(--input-bg);
285
  border: 1px solid var(--border-color);
286
  border-radius: var(--border-radius-md);
 
290
  box-sizing: border-box;
291
  color: var(--text-primary);
292
  transition: border-color 0.3s ease, box-shadow 0.3s ease;
293
+ min-height: 120px;
294
  box-shadow: 0 2px 4px rgba(0,0,0,0.05);
295
  }
296
 
297
  #text_input_box textarea::placeholder {
298
+ color: #B0BEC5;
299
  }
300
 
301
  #text_input_box textarea:focus {
 
304
  outline: none;
305
  }
306
 
307
+ #result_output_box {
308
+ background-color: var(--input-bg); /* Ensure background for the box */
309
  border: 1px solid var(--border-color);
310
  border-radius: var(--border-radius-md);
311
  padding: 20px;
 
313
  width: 100%;
314
  box-sizing: border-box;
315
  text-align: center;
316
+ font-size: clamp(16px, 3vw, 17px); /* Slightly adjusted font size for results */
317
  box-shadow: 0 4px 8px rgba(0,0,0,0.05);
318
+ min-height: 80px; /* Give it some min height */
319
+ display: flex; /* For centering content if needed */
320
+ flex-direction: column;
321
+ justify-content: center;
322
+ }
323
+ #result_output_box p { /* Style paragraphs inside the result box */
324
+ margin-bottom: 8px; /* Space between lines in result */
325
+ line-height: 1.6;
326
+ }
327
+ #result_output_box p:last-child {
328
+ margin-bottom: 0;
329
  }
330
 
331
+
332
  .highlight-human, .highlight-ai {
333
  font-weight: 600;
334
+ padding: 5px 10px; /* Adjusted padding */
335
  border-radius: var(--border-radius-md);
336
+ display: inline-block;
337
+ font-size: 1.05em; /* Adjusted size */
 
338
  }
339
 
340
  .highlight-human {
341
  color: var(--human-color);
342
  background-color: var(--human-bg);
343
+ /* border: 1px solid var(--human-color); Removed border for cleaner look */
344
  }
345
 
346
  .highlight-ai {
347
  color: var(--ai-color);
348
  background-color: var(--ai-bg);
349
+ /* border: 1px solid var(--ai-color); Removed border for cleaner look */
350
  }
351
 
352
+ .tabs > div:first-child button {
 
353
  background-color: transparent !important;
354
  color: var(--text-secondary) !important;
355
  border: none !important;
 
360
  transition: color 0.3s ease, border-bottom-color 0.3s ease !important;
361
  }
362
 
363
+ .tabs > div:first-child button.selected {
364
  color: var(--accent-color) !important;
365
  border-bottom-color: var(--accent-color) !important;
366
  font-weight: 600 !important;
367
  }
368
 
369
+ .gr-examples {
370
  padding: 15px !important;
371
  border: 1px solid var(--border-color) !important;
372
  border-radius: var(--border-radius-md) !important;
373
  background-color: #fdfdfd !important;
374
+ margin-top: 10px; /* Add some space above examples */
375
  }
376
+ .gr-sample-textbox {
377
  border: 1px solid var(--border-color) !important;
378
  border-radius: var(--border-radius-md) !important;
379
  font-size: 14px !important;
380
  }
381
+ .gr-accordion > .label-wrap button { /* Style accordion label */
382
+ font-weight: 500 !important;
383
+ color: var(--text-primary) !important;
384
+ }
385
+
386
 
387
  .footer-text, #bottom_text {
388
  text-align: center;
 
390
  font-size: clamp(13px, 2vw, 14px);
391
  color: var(--text-secondary);
392
  }
393
+ #bottom_text p {
394
  margin: 0;
395
  }
396
 
 
397
  @media (max-width: 768px) {
398
  body {
399
  padding: 10px;
400
+ align-items: flex-start;
401
  }
402
  .gradio-container {
403
  padding: 20px;
404
+ margin: 10px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
  }
406
+ h1 { font-size: 22px; } /* Adjust for custom title markdown */
407
+ .app-description p, .features-list li { font-size: 14px; }
408
+ #text_input_box textarea { font-size: 15px; min-height: 100px; }
409
+ #result_output_box { font-size: 15px; padding: 15px; }
410
  }
411
  """
412
 
413
  iface = gr.Blocks(css=modern_css, theme=gr.themes.Base(font=[gr.themes.GoogleFont("Inter"), "sans-serif"]))
414
 
415
  with iface:
416
+ gr.Markdown(title_md) # Using combined Markdown for title and subtitle
417
+ gr.Markdown(description)
418
 
419
  text_input = gr.Textbox(
420
+ label="",
421
  placeholder="Type or paste your content here...",
422
  elem_id="text_input_box",
423
+ lines=7 # Adjusted lines
424
  )
425
+ result_output = gr.HTML(elem_id="result_output_box")
426
+
427
+ # Only set up the change function if models are loaded
428
+ if all([tokenizer, model_1, model_2, model_3]):
429
+ text_input.change(classify_text_interface, inputs=text_input, outputs=result_output)
430
+ else:
431
+ # Display a persistent error if models couldn't load
432
+ gr.HTML("<div id='result_output_box'><p style='color: var(--ai-color); text-align: center;'><strong>Application Error: Models could not be loaded. Please check the server console for details.</strong></p></div>")
433
 
 
434
 
435
+ with gr.Row():
436
  with gr.Column(scale=1):
437
  with gr.Accordion("AI Text Examples", open=False):
438
  gr.Examples(
439
  examples=AI_texts,
440
  inputs=text_input,
441
+ label="", # Label removed as accordion title is enough
442
  )
443
  with gr.Column(scale=1):
444
  with gr.Accordion("Human Text Examples", open=False):
445
  gr.Examples(
446
  examples=Human_texts,
447
  inputs=text_input,
448
+ label="", # Label removed
449
  )
450
 
451
  gr.Markdown(bottom_text, elem_id="bottom_text")
452
 
453
+ if __name__ == "__main__":
454
+ iface.launch(share=False)