fork download
  1. import re
  2. import math
  3. import random
  4. from collections import defaultdict, Counter
  5.  
  6. class TextSimplifier:
  7. def __init__(self):
  8. # Initialize vocabulary mappings
  9. self.complex_to_simple = self._build_vocabulary_mapping()
  10. self.internet_slang = self._build_internet_slang() # Build this first
  11. self.phrase_mappings = self._build_phrase_mappings() # Now this can use internet_slang
  12. self.greeting_patterns = self._build_greeting_patterns()
  13. self.conversational_responses = self._build_conversational_responses()
  14.  
  15. # Simple neural network weights (initialized randomly)
  16. self.weights = {}
  17. self.bias = {}
  18. self.learning_rate = 0.01
  19.  
  20. # Training data for the ML model
  21. self.training_data = self._generate_training_data()
  22.  
  23. # Initialize and train the model
  24. self._initialize_model()
  25. self._train_model()
  26.  
  27. def _build_vocabulary_mapping(self):
  28. """Build comprehensive vocabulary mapping from complex to simple words"""
  29. return {
  30. # Formal greetings
  31. 'salutations': 'hey',
  32. 'greetings': 'hello',
  33. 'good_morning': 'hey',
  34. 'good_afternoon': 'hey',
  35. 'good_evening': 'hey',
  36. 'pleased_to_meet_you': 'nice to meet you',
  37.  
  38. # Modern internet expressions
  39. 'laughing': 'lol',
  40. 'hilarious': 'lmao',
  41. 'amusing': 'funny',
  42. 'humorous': 'funny',
  43. 'comical': 'funny',
  44. 'ridiculous': 'crazy',
  45. 'insane': 'crazy',
  46. 'unbelievable': 'omg',
  47. 'shocking': 'omg',
  48. 'surprising': 'wow',
  49. 'astonishing': 'wow',
  50.  
  51. # Formal inquiries
  52. 'how_are_you_doing': 'how are you',
  53. 'how_do_you_do': 'how are you',
  54. 'how_have_you_been': 'how are you',
  55. 'what_is_your_status': 'how are you',
  56. 'how_are_things': 'how are you',
  57. 'how_is_everything': 'how are you',
  58.  
  59. # Complex words to simple + modern slang
  60. 'excellent': 'great',
  61. 'outstanding': 'awesome',
  62. 'magnificent': 'amazing',
  63. 'superb': 'great',
  64. 'tremendous': 'awesome',
  65. 'extraordinary': 'amazing',
  66. 'remarkable': 'cool',
  67. 'incredible': 'amazing',
  68. 'fantastic': 'awesome',
  69. 'wonderful': 'great',
  70. 'marvelous': 'amazing',
  71. 'spectacular': 'awesome',
  72. 'phenomenal': 'sick',
  73. 'exceptional': 'dope',
  74. 'impressive': 'sick',
  75. 'astounding': 'crazy',
  76. 'stupendous': 'insane',
  77.  
  78. # Intensifiers
  79. 'extraordinarily': 'super',
  80. 'tremendously': 'really',
  81. 'exceptionally': 'really',
  82. 'remarkably': 'really',
  83. 'incredibly': 'super',
  84. 'particularly': 'really',
  85. 'extremely': 'super',
  86. 'highly': 'really',
  87. 'very_much': 'a lot',
  88. 'greatly': 'a lot',
  89. 'significantly': 'a lot',
  90. 'considerably': 'a lot',
  91. 'immensely': 'so much',
  92. 'profoundly': 'deeply',
  93.  
  94. # Formal words to casual
  95. 'certainly': 'sure',
  96. 'absolutely': 'totally',
  97. 'definitely': 'for sure',
  98. 'indeed': 'yeah',
  99. 'affirmative': 'yes',
  100. 'negative': 'no',
  101. 'perhaps': 'maybe',
  102. 'possibly': 'maybe',
  103. 'probably': 'probs',
  104. 'obviously': 'obv',
  105. 'apparently': 'seems like',
  106. 'evidently': 'clearly',
  107.  
  108. # Professional terms
  109. 'assist': 'help',
  110. 'assistance': 'help',
  111. 'facilitate': 'help',
  112. 'accommodate': 'help',
  113. 'provide': 'give',
  114. 'obtain': 'get',
  115. 'acquire': 'get',
  116. 'purchase': 'buy',
  117. 'utilize': 'use',
  118. 'implement': 'do',
  119. 'execute': 'do',
  120. 'accomplish': 'do',
  121. 'achieve': 'do',
  122. 'endeavor': 'try',
  123. 'attempt': 'try',
  124.  
  125. # Academic/formal to internet casual
  126. 'furthermore': 'also',
  127. 'moreover': 'plus',
  128. 'additionally': 'also',
  129. 'consequently': 'so',
  130. 'therefore': 'so',
  131. 'subsequently': 'then',
  132. 'previously': 'before',
  133. 'currently': 'rn',
  134. 'presently': 'now',
  135. 'immediately': 'asap',
  136. 'instantaneously': 'instantly',
  137. 'simultaneously': 'at the same time',
  138.  
  139. # Emotions with modern expressions
  140. 'delighted': 'happy',
  141. 'thrilled': 'hyped',
  142. 'ecstatic': 'super happy',
  143. 'content': 'happy',
  144. 'satisfied': 'happy',
  145. 'disappointed': 'bummed',
  146. 'devastated': 'crushed',
  147. 'frustrated': 'annoyed',
  148. 'irritated': 'annoyed',
  149. 'concerned': 'worried',
  150. 'anxious': 'stressed',
  151. 'melancholy': 'sad',
  152. 'miserable': 'feeling bad',
  153. 'furious': 'mad',
  154. 'enraged': 'pissed',
  155. 'exhausted': 'tired af',
  156. 'fatigued': 'tired',
  157.  
  158. # Actions
  159. 'communicate': 'talk',
  160. 'converse': 'chat',
  161. 'discuss': 'talk about',
  162. 'inquire': 'ask',
  163. 'respond': 'reply',
  164. 'reply': 'answer',
  165. 'depart': 'leave',
  166. 'arrive': 'come',
  167. 'attend': 'go to',
  168. 'participate': 'join',
  169. 'procrastinate': 'put off',
  170. 'contemplate': 'think about',
  171. 'comprehend': 'understand',
  172. 'appreciate': 'get',
  173.  
  174. # Common formal phrases to casual
  175. 'thank_you_very_much': 'thanks',
  176. 'i_appreciate_it': 'thanks',
  177. 'much_obliged': 'thanks',
  178. 'you_are_welcome': 'np',
  179. 'my_pleasure': 'np',
  180. 'excuse_me': 'sorry',
  181. 'i_beg_your_pardon': 'sorry',
  182. 'i_apologize': 'my bad',
  183. 'apologies': 'sorry',
  184.  
  185. # Internet specific
  186. 'information': 'info',
  187. 'photograph': 'pic',
  188. 'photographic': 'photo',
  189. 'application': 'app',
  190. 'advertisement': 'ad',
  191. 'examination': 'exam',
  192. 'mathematics': 'math',
  193. 'laboratory': 'lab',
  194. 'dormitory': 'dorm',
  195. 'refrigerator': 'fridge',
  196. 'automobile': 'car',
  197. 'telephone': 'phone',
  198. 'television': 'tv',
  199. }
  200.  
  201. def _build_internet_slang(self):
  202. """Build modern internet slang and abbreviations"""
  203. return {
  204. # Common abbreviations
  205. 'by the way': 'btw',
  206. 'in my opinion': 'imo',
  207. 'in my humble opinion': 'imho',
  208. 'to be honest': 'tbh',
  209. 'for your information': 'fyi',
  210. 'as far as i know': 'afaik',
  211. 'oh my god': 'omg',
  212. 'oh my gosh': 'omg',
  213. 'laugh out loud': 'lol',
  214. 'rolling on floor laughing': 'rofl',
  215. 'laughing my ass off': 'lmao',
  216. 'what the fuck': 'wtf',
  217. 'what the hell': 'wth',
  218. 'i don\'t know': 'idk',
  219. 'i don\'t care': 'idc',
  220. 'nevermind': 'nvm',
  221. 'not gonna lie': 'ngl',
  222. 'shake my head': 'smh',
  223. 'in real life': 'irl',
  224. 'direct message': 'dm',
  225. 'private message': 'pm',
  226. 'be right back': 'brb',
  227. 'away from keyboard': 'afk',
  228. 'good game': 'gg',
  229. 'for the win': 'ftw',
  230. 'you only live once': 'yolo',
  231. 'fear of missing out': 'fomo',
  232. 'today i learned': 'til',
  233. 'too long didn\'t read': 'tldr',
  234. 'face to face': 'f2f',
  235. 'for what it\'s worth': 'fwiw',
  236. 'not safe for work': 'nsfw',
  237.  
  238. # Modern slang expressions
  239. 'that\'s suspicious': 'sus',
  240. 'no problem': 'no prob',
  241. 'definitely': 'def',
  242. 'probably': 'prob',
  243. 'obviously': 'obv',
  244. 'seriously': 'srsly',
  245. 'totally': 'totes',
  246. 'whatever': 'whatev',
  247. 'because': 'cuz',
  248. 'though': 'tho',
  249. 'about to': 'bout to',
  250. 'going to': 'gonna',
  251. 'want to': 'wanna',
  252. 'got to': 'gotta',
  253. 'kind of': 'kinda',
  254. 'sort of': 'sorta',
  255. 'don\'t know': 'dunno',
  256. 'let me': 'lemme',
  257. 'give me': 'gimme',
  258. 'come on': 'cmon',
  259. 'all right': 'aight',
  260.  
  261. # Gen Z expressions
  262. 'that\'s cool': 'that slaps',
  263. 'very good': 'bussin',
  264. 'looking good': 'drip',
  265. 'showing off': 'flexing',
  266. 'lying': 'cap',
  267. 'truth': 'no cap',
  268. 'basic': 'basic',
  269. 'dramatic': 'extra',
  270. 'awesome': 'fire',
  271. 'great': 'lit',
  272. 'cool': 'dope',
  273. 'amazing': 'sick',
  274. 'angry': 'salty',
  275. 'bitter': 'salty',
  276. 'throw away': 'yeet',
  277. 'excited': 'hyped',
  278. 'cringe': 'cringe',
  279. 'awkward': 'awk',
  280. 'embarrassing': 'cringe',
  281. 'understand': 'vibe with',
  282. 'relate': 'felt that',
  283. 'agree': 'facts',
  284. 'true': 'fax',
  285. 'gossip': 'tea',
  286. 'drama': 'tea',
  287. 'shocked': 'shook',
  288. 'dead': 'ded',
  289. 'crying': 'im crying',
  290. 'can\'t even': 'i cant even',
  291. }
  292.  
  293. def _build_phrase_mappings(self):
  294. """Build phrase-level transformations"""
  295. base_mappings = {
  296. # Formal questions to casual
  297. r'how are you doing today\?': 'how are you?',
  298. r'how have you been lately\?': 'how are you?',
  299. r'what is your current status\?': 'how are you?',
  300. r'how are things going\?': 'how are you?',
  301. r'how is everything with you\?': 'how are you?',
  302. r'i hope you are doing well': 'hope you\'re good',
  303. r'i trust you are well': 'hope you\'re good',
  304. r'what\'s going on\?': 'what\'s up?',
  305. r'what is happening\?': 'what\'s up?',
  306. r'what are you up to\?': 'wyd?',
  307. r'what are you doing\?': 'wyd?',
  308.  
  309. # Professional to casual
  310. r'i would like to inquire about': 'i wanna ask about',
  311. r'i am writing to inform you': 'just letting you know',
  312. r'please be advised that': 'heads up',
  313. r'i wanted to let you know': 'just fyi',
  314. r'i am pleased to inform you': 'good news',
  315. r'i regret to inform you': 'bad news',
  316. r'for your information': 'fyi',
  317. r'as soon as possible': 'asap',
  318. r'at your earliest convenience': 'when you can',
  319.  
  320. # Modern conversational
  321. r'that\'s really cool': 'that\'s dope',
  322. r'that\'s very interesting': 'that\'s sick',
  323. r'i\'m very tired': 'i\'m dead',
  324. r'i\'m exhausted': 'i\'m done',
  325. r'that\'s hilarious': 'i\'m dying',
  326. r'that\'s so funny': 'lmaooo',
  327. r'i can\'t believe': 'i can\'t even',
  328. r'oh my goodness': 'omg',
  329. r'what in the world': 'wtf',
  330. r'are you serious': 'fr?',
  331. r'for real\?': 'fr?',
  332. r'no way': 'no cap?',
  333. r'i\'m not lying': 'no cap',
  334. r'to be honest': 'tbh',
  335. r'not going to lie': 'ngl',
  336. r'in my opinion': 'imo',
  337.  
  338. # Emotional expressions
  339. r'i\'m feeling great': 'i\'m vibing',
  340. r'i\'m doing well': 'i\'m good',
  341. r'i\'m not doing well': 'not great tbh',
  342. r'i\'m very happy': 'i\'m so happy rn',
  343. r'that makes me sad': 'that\'s sad',
  344. r'i don\'t understand': 'i\'m confused',
  345. r'that\'s confusing': 'wait what',
  346. r'i agree with you': 'facts',
  347. r'you\'re right': 'ur right',
  348. r'absolutely correct': 'facts',
  349. r'i disagree': 'nah',
  350. r'that\'s incorrect': 'that\'s cap',
  351.  
  352. # Time expressions
  353. r'right now': 'rn',
  354. r'at this moment': 'rn',
  355. r'talk to you later': 'ttyl',
  356. r'see you later': 'cya',
  357. r'goodbye': 'bye',
  358. r'have a good day': 'have a good one',
  359. r'take care': 'tc',
  360.  
  361. # Reactions
  362. r'that\'s amazing': 'that\'s fire',
  363. r'that\'s terrible': 'that sucks',
  364. r'i\'m shocked': 'i\'m shook',
  365. r'unbelievable': 'can\'t even',
  366. r'that\'s disgusting': 'ew',
  367. r'that\'s weird': 'that\'s sus',
  368. r'that\'s strange': 'weird flex but ok',
  369. }
  370.  
  371. # Add internet slang phrases
  372. for formal, slang in self.internet_slang.items():
  373. pattern = r'\b' + formal.replace(' ', r'\s+') + r'\b'
  374. base_mappings[pattern] = slang
  375.  
  376. return base_mappings
  377.  
  378. def _build_greeting_patterns(self):
  379. """Build greeting pattern recognition"""
  380. return {
  381. 'formal_greetings': [
  382. r'good morning.*', r'good afternoon.*', r'good evening.*',
  383. r'greetings.*', r'salutations.*', r'hello there.*'
  384. ],
  385. 'casual_greetings': [
  386. r'hey.*', r'hi.*', r'hello.*', r'yo.*', r'sup.*',
  387. r'wassup.*', r'what\'s up.*', r'howdy.*'
  388. ],
  389. 'questions_about_wellbeing': [
  390. r'how are you.*', r'how have you been.*', r'how are things.*',
  391. r'how is everything.*', r'what.*up.*', r'how.*doing.*',
  392. r'how.*going.*', r'you good.*', r'you okay.*'
  393. ],
  394. 'thanks_expressions': [
  395. r'thank you.*', r'thanks.*', r'i appreciate.*', r'much obliged.*',
  396. r'thx.*', r'ty.*', r'tysm.*', r'thank u.*'
  397. ]
  398. }
  399.  
  400. def _build_conversational_responses(self):
  401. """Build conversational response patterns"""
  402. return {
  403. 'agreement': [
  404. 'yeah totally', 'for sure', 'facts', 'definitely', 'yup',
  405. 'absolutely', 'you got it', 'right on', '100%', 'bet'
  406. ],
  407. 'disagreement': [
  408. 'nah', 'not really', 'idk about that', 'hmm not sure',
  409. 'that\'s cap', 'no way', 'doubt it'
  410. ],
  411. 'excitement': [
  412. 'omg yes', 'that\'s sick', 'no way', 'yooo', 'let\'s gooo',
  413. 'hype', 'that\'s fire', 'amazing'
  414. ],
  415. 'confusion': [
  416. 'wait what', 'huh', 'wdym', 'i\'m lost', 'confused af',
  417. 'what now', 'come again?'
  418. ],
  419. 'acknowledgment': [
  420. 'got it', 'makes sense', 'i see', 'ah ok', 'word',
  421. 'heard', 'copy that', 'roger'
  422. ]
  423. }
  424.  
  425. def _generate_training_data(self):
  426. """Generate comprehensive training data for the ML model"""
  427. training_examples = [
  428. # Greetings
  429. ("Good morning, how are you doing today?", "hey, how are you?"),
  430. ("Greetings! I hope you are well.", "hey! hope you're good"),
  431. ("Good afternoon, how have you been?", "hey, how are you?"),
  432. ("Salutations, my friend!", "hey friend!"),
  433. ("Hello there, how are things?", "hey, what's up?"),
  434. ("What's going on with you?", "what's up?"),
  435. ("How's it going?", "how are you?"),
  436.  
  437. # Modern casual conversations
  438. ("That's absolutely hilarious!", "lmaooo"),
  439. ("I can't believe what just happened", "i can't even"),
  440. ("Are you being serious right now?", "fr?"),
  441. ("That's really suspicious behavior", "that's sus"),
  442. ("I'm extremely exhausted today", "i'm dead tired"),
  443. ("This food is exceptionally delicious", "this food is bussin"),
  444. ("Your outfit looks amazing", "your fit is fire"),
  445. ("I'm genuinely shocked by this", "i'm shook"),
  446. ("That's incredibly cool", "that's sick"),
  447. ("I completely agree with your opinion", "facts"),
  448.  
  449. # Internet expressions
  450. ("To be honest, I don't really care", "tbh idc"),
  451. ("I don't know what you mean", "idk wym"),
  452. ("Oh my god, that's incredible", "omg that's amazing"),
  453. ("Not going to lie, that's impressive", "ngl that's dope"),
  454. ("For your information, the meeting is cancelled", "fyi the meeting is cancelled"),
  455. ("By the way, did you see that?", "btw did you see that?"),
  456. ("In my opinion, that's not correct", "imo that's wrong"),
  457. ("What the hell is happening?", "wth is going on?"),
  458.  
  459. # Formal inquiries to casual
  460. ("I would like to inquire about your status", "wanna know how you are"),
  461. ("How is everything going with you?", "how's it going?"),
  462. ("I trust you are doing well", "hope you're good"),
  463. ("What is your current situation?", "what's up with you?"),
  464. ("How have you been lately?", "how you been?"),
  465.  
  466. # Professional language to casual
  467. ("I am writing to inform you about the meeting", "heads up about the meeting"),
  468. ("Please be advised that the event is cancelled", "heads up the event is cancelled"),
  469. ("I wanted to let you know about the update", "fyi there's an update"),
  470. ("Thank you for your assistance with this matter", "thanks for the help"),
  471. ("I appreciate your time and effort", "thanks"),
  472. ("At your earliest convenience", "when you can"),
  473. ("As soon as possible", "asap"),
  474.  
  475. # Emotional expressions
  476. ("I am absolutely delighted to hear that", "i'm so happy to hear that"),
  477. ("That is truly magnificent news", "that's amazing news"),
  478. ("I am quite disappointed about this", "i'm bummed about this"),
  479. ("This is extraordinarily wonderful", "this is awesome"),
  480. ("I am tremendously excited", "i'm hyped"),
  481. ("I'm feeling melancholy today", "feeling sad today"),
  482. ("That's making me furious", "that's making me mad"),
  483. ("I'm concerned about the situation", "i'm worried about this"),
  484.  
  485. # Modern reactions
  486. ("That's the best thing ever!", "that's fire!"),
  487. ("This situation is terrible", "this sucks"),
  488. ("I can't handle this anymore", "i'm done"),
  489. ("That's extremely weird", "that's sus af"),
  490. ("You're absolutely right about that", "you're right, facts"),
  491. ("I strongly disagree with that statement", "nah that's cap"),
  492. ("This is making me laugh so hard", "i'm dying lol"),
  493. ("That's genuinely surprising", "wait what"),
  494.  
  495. # Conversational flow
  496. ("What are your thoughts on this matter?", "what do you think?"),
  497. ("Could you elaborate on that point?", "wdym?"),
  498. ("I need to depart now", "gotta go"),
  499. ("It was pleasant talking with you", "nice talking to you"),
  500. ("Until we meet again", "see ya"),
  501. ("Have a wonderful day", "have a good one"),
  502. ("Take care of yourself", "take care"),
  503.  
  504. # Complex to simple with slang
  505. ("The weather is absolutely magnificent today", "the weather is amazing today"),
  506. ("This is an extraordinarily difficult problem", "this is a really hard problem"),
  507. ("I am tremendously grateful for your help", "thanks so much for your help"),
  508. ("That was an outstanding performance", "that was sick"),
  509. ("Your presentation was phenomenal", "your presentation was fire"),
  510. ("The party was incredibly enjoyable", "the party was lit"),
  511.  
  512. # Internet culture
  513. ("That's creating a lot of drama", "that's creating tea"),
  514. ("Stop showing off so much", "stop flexing"),
  515. ("You're being overly dramatic", "you're being extra"),
  516. ("I understand what you mean", "i vibe with that"),
  517. ("That's completely true", "that's fax"),
  518. ("I'm leaving now", "i'm out"),
  519. ("Let's do this", "let's gooo"),
  520. ("That's embarrassing", "that's cringe"),
  521.  
  522. # Modern youth expressions
  523. ("That music is really good", "that music slaps"),
  524. ("You look very stylish", "you got drip"),
  525. ("I'm throwing this away", "gonna yeet this"),
  526. ("Don't lie to me", "don't cap"),
  527. ("I'm telling the truth", "no cap"),
  528. ("That made me very angry", "that made me salty"),
  529. ("The food tastes amazing", "the food is bussin"),
  530. ("That's basic and unoriginal", "that's basic"),
  531. ]
  532.  
  533. return training_examples
  534.  
  535. def _initialize_model(self):
  536. """Initialize simple neural network model"""
  537. # Feature dimensions
  538. self.input_size = 100 # Word embedding size
  539. self.hidden_size = 50
  540. self.output_size = 100
  541.  
  542. # Initialize weights randomly
  543. self.weights['W1'] = [[random.uniform(-1, 1) for _ in range(self.hidden_size)]
  544. for _ in range(self.input_size)]
  545. self.weights['W2'] = [[random.uniform(-1, 1) for _ in range(self.output_size)]
  546. for _ in range(self.hidden_size)]
  547.  
  548. # Initialize biases
  549. self.bias['b1'] = [random.uniform(-1, 1) for _ in range(self.hidden_size)]
  550. self.bias['b2'] = [random.uniform(-1, 1) for _ in range(self.output_size)]
  551.  
  552. # Word to index mapping
  553. self.word_to_index = {}
  554. self.index_to_word = {}
  555. self._build_vocabulary()
  556.  
  557. def _build_vocabulary(self):
  558. """Build vocabulary from training data"""
  559. all_words = set()
  560. for complex_text, simple_text in self.training_data:
  561. words = self._tokenize(complex_text.lower()) + self._tokenize(simple_text.lower())
  562. all_words.update(words)
  563.  
  564. # Add vocabulary mapping words
  565. for word in self.complex_to_simple.keys():
  566. all_words.add(word)
  567. for word in self.complex_to_simple.values():
  568. all_words.add(word)
  569.  
  570. # Add internet slang
  571. for phrase in self.internet_slang.keys():
  572. all_words.update(phrase.split())
  573. for slang in self.internet_slang.values():
  574. all_words.update(slang.split())
  575.  
  576. # Create word-to-index mapping
  577. for i, word in enumerate(sorted(all_words)):
  578. self.word_to_index[word] = i
  579. self.index_to_word[i] = word
  580.  
  581. def _tokenize(self, text):
  582. """Simple tokenization"""
  583. # Remove punctuation and split
  584. text = re.sub(r'[^\w\s\']', '', text.lower())
  585. return text.split()
  586.  
  587. def _text_to_vector(self, text):
  588. """Convert text to feature vector"""
  589. words = self._tokenize(text)
  590. vector = [0.0] * self.input_size
  591.  
  592. for word in words:
  593. if word in self.word_to_index:
  594. idx = self.word_to_index[word] % self.input_size
  595. vector[idx] += 1.0
  596.  
  597. # Normalize
  598. total = sum(vector)
  599. if total > 0:
  600. vector = [v / total for v in vector]
  601.  
  602. return vector
  603.  
  604. def _sigmoid(self, x):
  605. """Sigmoid activation function"""
  606. return 1 / (1 + math.exp(-max(-500, min(500, x))))
  607.  
  608. def _forward_pass(self, input_vector):
  609. """Forward pass through the network"""
  610. # Hidden layer
  611. hidden = []
  612. for i in range(self.hidden_size):
  613. weighted_sum = sum(input_vector[j] * self.weights['W1'][j][i]
  614. for j in range(self.input_size)) + self.bias['b1'][i]
  615. hidden.append(self._sigmoid(weighted_sum))
  616.  
  617. # Output layer
  618. output = []
  619. for i in range(self.output_size):
  620. weighted_sum = sum(hidden[j] * self.weights['W2'][j][i]
  621. for j in range(self.hidden_size)) + self.bias['b2'][i]
  622. output.append(self._sigmoid(weighted_sum))
  623.  
  624. return hidden, output
  625.  
  626. def _train_model(self):
  627. """Train the neural network model silently"""
  628. epochs = 30 # Balanced for performance
  629.  
  630. for epoch in range(epochs):
  631. total_error = 0
  632.  
  633. for complex_text, simple_text in self.training_data:
  634. try:
  635. # Convert to vectors
  636. input_vec = self._text_to_vector(complex_text)
  637. target_vec = self._text_to_vector(simple_text)
  638.  
  639. # Forward pass
  640. hidden, output = self._forward_pass(input_vec)
  641.  
  642. # Calculate error (simplified)
  643. if len(output) == len(target_vec):
  644. error = sum((target_vec[i] - output[i]) ** 2 for i in range(len(output)))
  645. total_error += error
  646.  
  647. # Simple weight update (simplified backpropagation)
  648. for i in range(min(self.output_size, len(output))):
  649. for j in range(self.hidden_size):
  650. if i < len(target_vec):
  651. gradient = 2 * (output[i] - target_vec[i]) * output[i] * (1 - output[i])
  652. self.weights['W2'][j][i] -= self.learning_rate * gradient * hidden[j]
  653.  
  654. except Exception:
  655. # Skip problematic training examples
  656. continue
  657.  
  658. def _apply_vocabulary_mapping(self, text):
  659. """Apply direct vocabulary mappings"""
  660. words = self._tokenize(text)
  661. result = []
  662.  
  663. for word in words:
  664. if word in self.complex_to_simple:
  665. result.append(self.complex_to_simple[word])
  666. else:
  667. result.append(word)
  668.  
  669. return ' '.join(result)
  670.  
  671. def _apply_phrase_mappings(self, text):
  672. """Apply phrase-level transformations"""
  673. result = text.lower()
  674.  
  675. for pattern, replacement in self.phrase_mappings.items():
  676. result = re.sub(pattern, replacement, result, flags=re.IGNORECASE)
  677.  
  678. return result
  679.  
  680. def _classify_intent(self, text):
  681. """Classify the intent of the input text"""
  682. text_lower = text.lower()
  683.  
  684. # Check for greetings
  685. for pattern in self.greeting_patterns['formal_greetings']:
  686. if re.search(pattern, text_lower):
  687. return 'greeting'
  688.  
  689. for pattern in self.greeting_patterns['casual_greetings']:
  690. if re.search(pattern, text_lower):
  691. return 'casual_greeting'
  692.  
  693. # Check for wellbeing questions
  694. for pattern in self.greeting_patterns['questions_about_wellbeing']:
  695. if re.search(pattern, text_lower):
  696. return 'wellbeing_question'
  697.  
  698. # Check for thanks
  699. for pattern in self.greeting_patterns['thanks_expressions']:
  700. if re.search(pattern, text_lower):
  701. return 'thanks'
  702.  
  703. # Check for questions
  704. if '?' in text or any(q in text_lower for q in ['what', 'who', 'where', 'when', 'why', 'how']):
  705. return 'question'
  706.  
  707. # Check for excitement
  708. if any(word in text_lower for word in ['amazing', 'awesome', 'incredible', 'fantastic', '!']):
  709. return 'excitement'
  710.  
  711. # Check for agreement/disagreement
  712. if any(word in text_lower for word in ['agree', 'right', 'correct', 'true']):
  713. return 'agreement'
  714. if any(word in text_lower for word in ['disagree', 'wrong', 'incorrect', 'false']):
  715. return 'disagreement'
  716.  
  717. return 'general'
  718.  
  719. def _apply_intent_based_simplification(self, text, intent):
  720. """Apply simplification based on classified intent"""
  721. if intent == 'greeting':
  722. return random.choice(['hey', 'hi', 'hello', 'yo'])
  723. elif intent == 'casual_greeting':
  724. return text.lower()
  725. elif intent == 'wellbeing_question':
  726. return random.choice(['how are you?', 'what\'s up?', 'how\'s it going?', 'you good?'])
  727. elif intent == 'thanks':
  728. return random.choice(['thanks', 'thx', 'ty', 'thanks!'])
  729. elif intent == 'excitement':
  730. if 'amazing' in text.lower() or 'incredible' in text.lower():
  731. return random.choice(['that\'s sick!', 'that\'s fire!', 'omg yes!', 'let\'s gooo!'])
  732. elif intent == 'agreement':
  733. return random.choice(self.conversational_responses['agreement'])
  734. elif intent == 'disagreement':
  735. return random.choice(self.conversational_responses['disagreement'])
  736. elif intent == 'question' and 'what' in text.lower() and 'mean' in text.lower():
  737. return 'wdym?'
  738.  
  739. return text
  740.  
  741. def _ml_simplify(self, text):
  742. """Use ML model to simplify text"""
  743. input_vec = self._text_to_vector(text)
  744. hidden, output = self._forward_pass(input_vec)
  745.  
  746. # Find most activated words in output
  747. top_indices = sorted(range(len(output)), key=lambda i: output[i], reverse=True)[:5]
  748.  
  749. result_words = []
  750. for idx in top_indices:
  751. if idx < len(self.index_to_word) and output[idx] > 0.5:
  752. result_words.append(self.index_to_word[idx])
  753.  
  754. return ' '.join(result_words) if result_words else text
  755.  
  756. def simplify(self, text):
  757. """Main simplification function combining all techniques"""
  758. if not text or not text.strip():
  759. return text
  760.  
  761. # Step 1: Classify intent
  762. intent = self._classify_intent(text)
  763.  
  764. # Step 2: Apply intent-based simplification for common patterns
  765. intent_result = self._apply_intent_based_simplification(text, intent)
  766. if intent_result != text:
  767. return intent_result
  768.  
  769. # Step 3: Apply phrase mappings
  770. phrase_result = self._apply_phrase_mappings(text)
  771.  
  772. # Step 4: Apply vocabulary mappings
  773. vocab_result = self._apply_vocabulary_mapping(phrase_result)
  774.  
  775. # Step 5: Apply ML model for additional simplification
  776. ml_result = self._ml_simplify(vocab_result)
  777.  
  778. # Step 6: Post-processing cleanup
  779. final_result = self._post_process(ml_result if ml_result.strip() else vocab_result)
  780.  
  781. return final_result
  782.  
  783. def _post_process(self, text):
  784. """Post-process the simplified text"""
  785. # Remove extra spaces
  786. text = re.sub(r'\s+', ' ', text.strip())
  787.  
  788. # Handle question marks
  789. question_words = ['how', 'what', 'why', 'when', 'where', 'who', 'wyd', 'wdym', 'fr']
  790. if any(word in text.lower() for word in question_words):
  791. if not text.endswith('?'):
  792. text += '?'
  793.  
  794. # Don't capitalize certain internet slang
  795. no_caps = ['lol', 'lmao', 'omg', 'wtf', 'wth', 'idk', 'idc', 'ngl', 'tbh',
  796. 'imo', 'imho', 'fyi', 'asap', 'rn', 'af', 'btw', 'smh', 'rofl']
  797.  
  798. if text and not any(text.lower().startswith(word) for word in no_caps):
  799. text = text[0].upper() + text[1:]
  800.  
  801. return text
  802.  
  803. def get_conversational_response(self, text):
  804. """Generate a conversational response based on the input"""
  805. simplified = self.simplify(text)
  806. intent = self._classify_intent(text)
  807.  
  808. # Add some conversational flair based on intent
  809. if intent == 'wellbeing_question':
  810. responses = [
  811. "i'm good! " + simplified,
  812. "doing great! " + simplified,
  813. "all good here! " + simplified,
  814. "vibing! " + simplified
  815. ]
  816. return random.choice(responses)
  817.  
  818. return simplified
  819.  
  820. def batch_simplify(self, texts):
  821. """Simplify multiple texts at once"""
  822. return [self.simplify(text) for text in texts]
  823.  
  824. def add_custom_mapping(self, complex_word, simple_word):
  825. """Add custom vocabulary mapping"""
  826. self.complex_to_simple[complex_word.lower()] = simple_word.lower()
  827.  
  828. def get_simplification_confidence(self, original, simplified):
  829. """Calculate confidence score for simplification"""
  830. original_words = set(self._tokenize(original.lower()))
  831. simplified_words = set(self._tokenize(simplified.lower()))
  832.  
  833. # Calculate word reduction ratio
  834. reduction_ratio = 1 - (len(simplified_words) / max(len(original_words), 1))
  835.  
  836. # Calculate known word mapping ratio
  837. mapped_words = sum(1 for word in original_words if word in self.complex_to_simple)
  838. mapping_ratio = mapped_words / max(len(original_words), 1)
  839.  
  840. # Check for slang usage
  841. slang_words = sum(1 for word in simplified_words
  842. if word in self.internet_slang.values())
  843. slang_ratio = slang_words / max(len(simplified_words), 1)
  844.  
  845. # Combined confidence score
  846. confidence = (reduction_ratio * 0.3) + (mapping_ratio * 0.4) + (slang_ratio * 0.3)
  847. return min(1.0, max(0.0, confidence))
  848.  
  849. # Example usage and testing
  850. def test_text_simplifier():
  851. """Test the text simplifier with various examples"""
  852. simplifier = TextSimplifier()
  853.  
  854. test_cases = [
  855. # Original test cases
  856. "Good morning, how are you doing today?",
  857. "I would like to inquire about your current status",
  858. "Thank you very much for your assistance",
  859.  
  860. # Modern slang tests
  861. "That's absolutely hilarious!",
  862. "I can't believe what just happened",
  863. "To be honest, I don't really care about that",
  864. "Oh my god, that's incredible!",
  865. "Not going to lie, that was impressive",
  866.  
  867. # Internet culture
  868. "That outfit looks amazing on you",
  869. "This food is exceptionally delicious",
  870. "Are you being serious right now?",
  871. "That's really suspicious behavior",
  872. "I'm extremely exhausted today",
  873.  
  874. # Conversational
  875. "What are your thoughts on this matter?",
  876. "I strongly disagree with that statement",
  877. "That's the best thing I've ever seen!",
  878. "I'm feeling melancholy today",
  879. "Your presentation was phenomenal",
  880.  
  881. # Mixed formal and casual
  882. "By the way, the meeting is at 3pm",
  883. "For your information, the project is complete",
  884. "I appreciate your prompt response",
  885. "The weather is absolutely magnificent today",
  886. "That party was incredibly enjoyable"
  887. ]
  888.  
  889. print("šŸ”„ === Enhanced Text Simplification Results === šŸ”„\n")
  890.  
  891. for i, text in enumerate(test_cases, 1):
  892. simplified = simplifier.simplify(text)
  893. confidence = simplifier.get_simplification_confidence(text, simplified)
  894.  
  895. # Emoji based on confidence
  896. if confidence >= 0.5:
  897. emoji = "šŸ’Æ"
  898. elif confidence >= 0.3:
  899. emoji = "✨"
  900. else:
  901. emoji = "šŸ‘"
  902.  
  903. print(f"{i:2d}. Original: {text}")
  904. print(f" Simplified: {simplified} {emoji}")
  905. print(f" Confidence: {confidence:.2f}")
  906. print()
  907.  
  908. def demo_modern_conversations():
  909. """Demo modern conversational examples"""
  910. simplifier = TextSimplifier()
  911.  
  912. conversations = [
  913. ("User", "Hey there! How's everything going with you today?"),
  914. ("User", "I just saw the most incredible thing ever!"),
  915. ("User", "To be honest, I'm not feeling great about this situation"),
  916. ("User", "Oh my goodness, did you see what happened?"),
  917. ("User", "That's extremely suspicious if you ask me"),
  918. ("User", "I'm absolutely exhausted from all this work"),
  919. ("User", "Your new hairstyle looks phenomenal!"),
  920. ("User", "Not going to lie, that was pretty impressive"),
  921. ("User", "What do you think about the new update?"),
  922. ("User", "I completely disagree with that opinion"),
  923. ]
  924.  
  925. print("\nšŸ’¬ === Modern Conversation Demo === šŸ’¬\n")
  926.  
  927. for speaker, text in conversations:
  928. response = simplifier.get_conversational_response(text)
  929. print(f"{speaker}: {text}")
  930. print(f"Bot: {response}")
  931. print()
  932.  
  933. def interactive_simplifier():
  934. """Interactive text simplification with modern slang"""
  935. simplifier = TextSimplifier()
  936.  
  937. print("šŸš€ === Interactive Modern Text Simplifier === šŸš€")
  938. print("šŸ’¬ Type any text and watch it get simplified with modern slang!")
  939. print("šŸ”„ Try formal language, emotions, questions, reactions...")
  940. print("āŒ Type 'quit', 'exit', or 'q' to stop")
  941. print("=" * 60)
  942.  
  943. example_suggestions = [
  944. "That's absolutely incredible!",
  945. "I'm extremely tired from work today",
  946. "Oh my god, did you see that?",
  947. "To be honest, I don't understand",
  948. "Your outfit looks amazing!",
  949. "Not going to lie, that was awesome",
  950. "I completely agree with your opinion",
  951. "Are you being serious right now?"
  952. ]
  953.  
  954. print("\nšŸ”„ Try these examples:")
  955. for i, example in enumerate(example_suggestions[:4], 1):
  956. print(f"{i}. {example}")
  957. print()
  958.  
  959. conversation_history = []
  960.  
  961. while True:
  962. try:
  963. user_input = input("šŸ‘¤ You: ").strip()
  964.  
  965. if user_input.lower() in ['quit', 'exit', 'q']:
  966. print("āœŒļø Later! Thanks for chatting!")
  967. break
  968.  
  969. if not user_input:
  970. print("šŸ˜… Say something...")
  971. continue
  972.  
  973. # Process and respond
  974. response = simplifier.get_conversational_response(user_input)
  975. confidence = simplifier.get_simplification_confidence(user_input, response)
  976.  
  977. # Show the bot response
  978. print(f"šŸ¤– Bot: {response}")
  979.  
  980. # Add to conversation history
  981. conversation_history.append((user_input, response))
  982.  
  983. # Show stats occasionally
  984. if len(conversation_history) % 5 == 0:
  985. print(f"\nšŸ“Š Conversation streak: {len(conversation_history)} exchanges!")
  986.  
  987. print()
  988.  
  989. except KeyboardInterrupt:
  990. print("\n\nāœŒļø Peace out! Thanks for the chat!")
  991. break
  992. except Exception as e:
  993. print(f"😬 Oops: {e}")
  994. print("Try again...")
  995. continue
  996.  
  997. if __name__ == "__main__":
  998. print("šŸš€ Modern Text Simplification System Ready!")
  999. print("=" * 50)
  1000.  
  1001. # Run tests first to show capabilities
  1002. test_text_simplifier()
  1003.  
  1004. # Then show conversation demo
  1005. demo_modern_conversations()
  1006.  
  1007. # Start interactive mode
  1008. print("\n" + "=" * 60)
  1009. interactive_simplifier()
Success #stdin #stdout 0.04s 25840KB
stdin
Standard input is empty
stdout
import re
import math
import random
from collections import defaultdict, Counter

class TextSimplifier:
    def __init__(self):
        # Initialize vocabulary mappings
        self.complex_to_simple = self._build_vocabulary_mapping()
        self.internet_slang = self._build_internet_slang()  # Build this first
        self.phrase_mappings = self._build_phrase_mappings()  # Now this can use internet_slang
        self.greeting_patterns = self._build_greeting_patterns()
        self.conversational_responses = self._build_conversational_responses()
        
        # Simple neural network weights (initialized randomly)
        self.weights = {}
        self.bias = {}
        self.learning_rate = 0.01
        
        # Training data for the ML model
        self.training_data = self._generate_training_data()
        
        # Initialize and train the model
        self._initialize_model()
        self._train_model()
    
    def _build_vocabulary_mapping(self):
        """Build comprehensive vocabulary mapping from complex to simple words"""
        return {
            # Formal greetings
            'salutations': 'hey',
            'greetings': 'hello',
            'good_morning': 'hey',
            'good_afternoon': 'hey',
            'good_evening': 'hey',
            'pleased_to_meet_you': 'nice to meet you',
            
            # Modern internet expressions
            'laughing': 'lol',
            'hilarious': 'lmao',
            'amusing': 'funny',
            'humorous': 'funny',
            'comical': 'funny',
            'ridiculous': 'crazy',
            'insane': 'crazy',
            'unbelievable': 'omg',
            'shocking': 'omg',
            'surprising': 'wow',
            'astonishing': 'wow',
            
            # Formal inquiries
            'how_are_you_doing': 'how are you',
            'how_do_you_do': 'how are you',
            'how_have_you_been': 'how are you',
            'what_is_your_status': 'how are you',
            'how_are_things': 'how are you',
            'how_is_everything': 'how are you',
            
            # Complex words to simple + modern slang
            'excellent': 'great',
            'outstanding': 'awesome',
            'magnificent': 'amazing',
            'superb': 'great',
            'tremendous': 'awesome',
            'extraordinary': 'amazing',
            'remarkable': 'cool',
            'incredible': 'amazing',
            'fantastic': 'awesome',
            'wonderful': 'great',
            'marvelous': 'amazing',
            'spectacular': 'awesome',
            'phenomenal': 'sick',
            'exceptional': 'dope',
            'impressive': 'sick',
            'astounding': 'crazy',
            'stupendous': 'insane',
            
            # Intensifiers
            'extraordinarily': 'super',
            'tremendously': 'really',
            'exceptionally': 'really',
            'remarkably': 'really',
            'incredibly': 'super',
            'particularly': 'really',
            'extremely': 'super',
            'highly': 'really',
            'very_much': 'a lot',
            'greatly': 'a lot',
            'significantly': 'a lot',
            'considerably': 'a lot',
            'immensely': 'so much',
            'profoundly': 'deeply',
            
            # Formal words to casual
            'certainly': 'sure',
            'absolutely': 'totally',
            'definitely': 'for sure',
            'indeed': 'yeah',
            'affirmative': 'yes',
            'negative': 'no',
            'perhaps': 'maybe',
            'possibly': 'maybe',
            'probably': 'probs',
            'obviously': 'obv',
            'apparently': 'seems like',
            'evidently': 'clearly',
            
            # Professional terms
            'assist': 'help',
            'assistance': 'help',
            'facilitate': 'help',
            'accommodate': 'help',
            'provide': 'give',
            'obtain': 'get',
            'acquire': 'get',
            'purchase': 'buy',
            'utilize': 'use',
            'implement': 'do',
            'execute': 'do',
            'accomplish': 'do',
            'achieve': 'do',
            'endeavor': 'try',
            'attempt': 'try',
            
            # Academic/formal to internet casual
            'furthermore': 'also',
            'moreover': 'plus',
            'additionally': 'also',
            'consequently': 'so',
            'therefore': 'so',
            'subsequently': 'then',
            'previously': 'before',
            'currently': 'rn',
            'presently': 'now',
            'immediately': 'asap',
            'instantaneously': 'instantly',
            'simultaneously': 'at the same time',
            
            # Emotions with modern expressions
            'delighted': 'happy',
            'thrilled': 'hyped',
            'ecstatic': 'super happy',
            'content': 'happy',
            'satisfied': 'happy',
            'disappointed': 'bummed',
            'devastated': 'crushed',
            'frustrated': 'annoyed',
            'irritated': 'annoyed',
            'concerned': 'worried',
            'anxious': 'stressed',
            'melancholy': 'sad',
            'miserable': 'feeling bad',
            'furious': 'mad',
            'enraged': 'pissed',
            'exhausted': 'tired af',
            'fatigued': 'tired',
            
            # Actions
            'communicate': 'talk',
            'converse': 'chat',
            'discuss': 'talk about',
            'inquire': 'ask',
            'respond': 'reply',
            'reply': 'answer',
            'depart': 'leave',
            'arrive': 'come',
            'attend': 'go to',
            'participate': 'join',
            'procrastinate': 'put off',
            'contemplate': 'think about',
            'comprehend': 'understand',
            'appreciate': 'get',
            
            # Common formal phrases to casual
            'thank_you_very_much': 'thanks',
            'i_appreciate_it': 'thanks',
            'much_obliged': 'thanks',
            'you_are_welcome': 'np',
            'my_pleasure': 'np',
            'excuse_me': 'sorry',
            'i_beg_your_pardon': 'sorry',
            'i_apologize': 'my bad',
            'apologies': 'sorry',
            
            # Internet specific
            'information': 'info',
            'photograph': 'pic',
            'photographic': 'photo',
            'application': 'app',
            'advertisement': 'ad',
            'examination': 'exam',
            'mathematics': 'math',
            'laboratory': 'lab',
            'dormitory': 'dorm',
            'refrigerator': 'fridge',
            'automobile': 'car',
            'telephone': 'phone',
            'television': 'tv',
        }
    
    def _build_internet_slang(self):
        """Build modern internet slang and abbreviations"""
        return {
            # Common abbreviations
            'by the way': 'btw',
            'in my opinion': 'imo',
            'in my humble opinion': 'imho',
            'to be honest': 'tbh',
            'for your information': 'fyi',
            'as far as i know': 'afaik',
            'oh my god': 'omg',
            'oh my gosh': 'omg',
            'laugh out loud': 'lol',
            'rolling on floor laughing': 'rofl',
            'laughing my ass off': 'lmao',
            'what the fuck': 'wtf',
            'what the hell': 'wth',
            'i don\'t know': 'idk',
            'i don\'t care': 'idc',
            'nevermind': 'nvm',
            'not gonna lie': 'ngl',
            'shake my head': 'smh',
            'in real life': 'irl',
            'direct message': 'dm',
            'private message': 'pm',
            'be right back': 'brb',
            'away from keyboard': 'afk',
            'good game': 'gg',
            'for the win': 'ftw',
            'you only live once': 'yolo',
            'fear of missing out': 'fomo',
            'today i learned': 'til',
            'too long didn\'t read': 'tldr',
            'face to face': 'f2f',
            'for what it\'s worth': 'fwiw',
            'not safe for work': 'nsfw',
            
            # Modern slang expressions
            'that\'s suspicious': 'sus',
            'no problem': 'no prob',
            'definitely': 'def',
            'probably': 'prob',
            'obviously': 'obv',
            'seriously': 'srsly',
            'totally': 'totes',
            'whatever': 'whatev',
            'because': 'cuz',
            'though': 'tho',
            'about to': 'bout to',
            'going to': 'gonna',
            'want to': 'wanna',
            'got to': 'gotta',
            'kind of': 'kinda',
            'sort of': 'sorta',
            'don\'t know': 'dunno',
            'let me': 'lemme',
            'give me': 'gimme',
            'come on': 'cmon',
            'all right': 'aight',
            
            # Gen Z expressions
            'that\'s cool': 'that slaps',
            'very good': 'bussin',
            'looking good': 'drip',
            'showing off': 'flexing',
            'lying': 'cap',
            'truth': 'no cap',
            'basic': 'basic',
            'dramatic': 'extra',
            'awesome': 'fire',
            'great': 'lit',
            'cool': 'dope',
            'amazing': 'sick',
            'angry': 'salty',
            'bitter': 'salty',
            'throw away': 'yeet',
            'excited': 'hyped',
            'cringe': 'cringe',
            'awkward': 'awk',
            'embarrassing': 'cringe',
            'understand': 'vibe with',
            'relate': 'felt that',
            'agree': 'facts',
            'true': 'fax',
            'gossip': 'tea',
            'drama': 'tea',
            'shocked': 'shook',
            'dead': 'ded',
            'crying': 'im crying',
            'can\'t even': 'i cant even',
        }
    
    def _build_phrase_mappings(self):
        """Build phrase-level transformations"""
        base_mappings = {
            # Formal questions to casual
            r'how are you doing today\?': 'how are you?',
            r'how have you been lately\?': 'how are you?',
            r'what is your current status\?': 'how are you?',
            r'how are things going\?': 'how are you?',
            r'how is everything with you\?': 'how are you?',
            r'i hope you are doing well': 'hope you\'re good',
            r'i trust you are well': 'hope you\'re good',
            r'what\'s going on\?': 'what\'s up?',
            r'what is happening\?': 'what\'s up?',
            r'what are you up to\?': 'wyd?',
            r'what are you doing\?': 'wyd?',
            
            # Professional to casual
            r'i would like to inquire about': 'i wanna ask about',
            r'i am writing to inform you': 'just letting you know',
            r'please be advised that': 'heads up',
            r'i wanted to let you know': 'just fyi',
            r'i am pleased to inform you': 'good news',
            r'i regret to inform you': 'bad news',
            r'for your information': 'fyi',
            r'as soon as possible': 'asap',
            r'at your earliest convenience': 'when you can',
            
            # Modern conversational
            r'that\'s really cool': 'that\'s dope',
            r'that\'s very interesting': 'that\'s sick',
            r'i\'m very tired': 'i\'m dead',
            r'i\'m exhausted': 'i\'m done',
            r'that\'s hilarious': 'i\'m dying',
            r'that\'s so funny': 'lmaooo',
            r'i can\'t believe': 'i can\'t even',
            r'oh my goodness': 'omg',
            r'what in the world': 'wtf',
            r'are you serious': 'fr?',
            r'for real\?': 'fr?',
            r'no way': 'no cap?',
            r'i\'m not lying': 'no cap',
            r'to be honest': 'tbh',
            r'not going to lie': 'ngl',
            r'in my opinion': 'imo',
            
            # Emotional expressions
            r'i\'m feeling great': 'i\'m vibing',
            r'i\'m doing well': 'i\'m good',
            r'i\'m not doing well': 'not great tbh',
            r'i\'m very happy': 'i\'m so happy rn',
            r'that makes me sad': 'that\'s sad',
            r'i don\'t understand': 'i\'m confused',
            r'that\'s confusing': 'wait what',
            r'i agree with you': 'facts',
            r'you\'re right': 'ur right',
            r'absolutely correct': 'facts',
            r'i disagree': 'nah',
            r'that\'s incorrect': 'that\'s cap',
            
            # Time expressions
            r'right now': 'rn',
            r'at this moment': 'rn',
            r'talk to you later': 'ttyl',
            r'see you later': 'cya',
            r'goodbye': 'bye',
            r'have a good day': 'have a good one',
            r'take care': 'tc',
            
            # Reactions
            r'that\'s amazing': 'that\'s fire',
            r'that\'s terrible': 'that sucks',
            r'i\'m shocked': 'i\'m shook',
            r'unbelievable': 'can\'t even',
            r'that\'s disgusting': 'ew',
            r'that\'s weird': 'that\'s sus',
            r'that\'s strange': 'weird flex but ok',
        }
        
        # Add internet slang phrases
        for formal, slang in self.internet_slang.items():
            pattern = r'\b' + formal.replace(' ', r'\s+') + r'\b'
            base_mappings[pattern] = slang
        
        return base_mappings
    
    def _build_greeting_patterns(self):
        """Build greeting pattern recognition"""
        return {
            'formal_greetings': [
                r'good morning.*', r'good afternoon.*', r'good evening.*',
                r'greetings.*', r'salutations.*', r'hello there.*'
            ],
            'casual_greetings': [
                r'hey.*', r'hi.*', r'hello.*', r'yo.*', r'sup.*', 
                r'wassup.*', r'what\'s up.*', r'howdy.*'
            ],
            'questions_about_wellbeing': [
                r'how are you.*', r'how have you been.*', r'how are things.*',
                r'how is everything.*', r'what.*up.*', r'how.*doing.*',
                r'how.*going.*', r'you good.*', r'you okay.*'
            ],
            'thanks_expressions': [
                r'thank you.*', r'thanks.*', r'i appreciate.*', r'much obliged.*',
                r'thx.*', r'ty.*', r'tysm.*', r'thank u.*'
            ]
        }
    
    def _build_conversational_responses(self):
        """Build conversational response patterns"""
        return {
            'agreement': [
                'yeah totally', 'for sure', 'facts', 'definitely', 'yup',
                'absolutely', 'you got it', 'right on', '100%', 'bet'
            ],
            'disagreement': [
                'nah', 'not really', 'idk about that', 'hmm not sure',
                'that\'s cap', 'no way', 'doubt it'
            ],
            'excitement': [
                'omg yes', 'that\'s sick', 'no way', 'yooo', 'let\'s gooo',
                'hype', 'that\'s fire', 'amazing'
            ],
            'confusion': [
                'wait what', 'huh', 'wdym', 'i\'m lost', 'confused af',
                'what now', 'come again?'
            ],
            'acknowledgment': [
                'got it', 'makes sense', 'i see', 'ah ok', 'word',
                'heard', 'copy that', 'roger'
            ]
        }
    
    def _generate_training_data(self):
        """Generate comprehensive training data for the ML model"""
        training_examples = [
            # Greetings
            ("Good morning, how are you doing today?", "hey, how are you?"),
            ("Greetings! I hope you are well.", "hey! hope you're good"),
            ("Good afternoon, how have you been?", "hey, how are you?"),
            ("Salutations, my friend!", "hey friend!"),
            ("Hello there, how are things?", "hey, what's up?"),
            ("What's going on with you?", "what's up?"),
            ("How's it going?", "how are you?"),
            
            # Modern casual conversations
            ("That's absolutely hilarious!", "lmaooo"),
            ("I can't believe what just happened", "i can't even"),
            ("Are you being serious right now?", "fr?"),
            ("That's really suspicious behavior", "that's sus"),
            ("I'm extremely exhausted today", "i'm dead tired"),
            ("This food is exceptionally delicious", "this food is bussin"),
            ("Your outfit looks amazing", "your fit is fire"),
            ("I'm genuinely shocked by this", "i'm shook"),
            ("That's incredibly cool", "that's sick"),
            ("I completely agree with your opinion", "facts"),
            
            # Internet expressions
            ("To be honest, I don't really care", "tbh idc"),
            ("I don't know what you mean", "idk wym"),
            ("Oh my god, that's incredible", "omg that's amazing"),
            ("Not going to lie, that's impressive", "ngl that's dope"),
            ("For your information, the meeting is cancelled", "fyi the meeting is cancelled"),
            ("By the way, did you see that?", "btw did you see that?"),
            ("In my opinion, that's not correct", "imo that's wrong"),
            ("What the hell is happening?", "wth is going on?"),
            
            # Formal inquiries to casual
            ("I would like to inquire about your status", "wanna know how you are"),
            ("How is everything going with you?", "how's it going?"),
            ("I trust you are doing well", "hope you're good"),
            ("What is your current situation?", "what's up with you?"),
            ("How have you been lately?", "how you been?"),
            
            # Professional language to casual
            ("I am writing to inform you about the meeting", "heads up about the meeting"),
            ("Please be advised that the event is cancelled", "heads up the event is cancelled"),
            ("I wanted to let you know about the update", "fyi there's an update"),
            ("Thank you for your assistance with this matter", "thanks for the help"),
            ("I appreciate your time and effort", "thanks"),
            ("At your earliest convenience", "when you can"),
            ("As soon as possible", "asap"),
            
            # Emotional expressions
            ("I am absolutely delighted to hear that", "i'm so happy to hear that"),
            ("That is truly magnificent news", "that's amazing news"),
            ("I am quite disappointed about this", "i'm bummed about this"),
            ("This is extraordinarily wonderful", "this is awesome"),
            ("I am tremendously excited", "i'm hyped"),
            ("I'm feeling melancholy today", "feeling sad today"),
            ("That's making me furious", "that's making me mad"),
            ("I'm concerned about the situation", "i'm worried about this"),
            
            # Modern reactions
            ("That's the best thing ever!", "that's fire!"),
            ("This situation is terrible", "this sucks"),
            ("I can't handle this anymore", "i'm done"),
            ("That's extremely weird", "that's sus af"),
            ("You're absolutely right about that", "you're right, facts"),
            ("I strongly disagree with that statement", "nah that's cap"),
            ("This is making me laugh so hard", "i'm dying lol"),
            ("That's genuinely surprising", "wait what"),
            
            # Conversational flow
            ("What are your thoughts on this matter?", "what do you think?"),
            ("Could you elaborate on that point?", "wdym?"),
            ("I need to depart now", "gotta go"),
            ("It was pleasant talking with you", "nice talking to you"),
            ("Until we meet again", "see ya"),
            ("Have a wonderful day", "have a good one"),
            ("Take care of yourself", "take care"),
            
            # Complex to simple with slang
            ("The weather is absolutely magnificent today", "the weather is amazing today"),
            ("This is an extraordinarily difficult problem", "this is a really hard problem"),
            ("I am tremendously grateful for your help", "thanks so much for your help"),
            ("That was an outstanding performance", "that was sick"),
            ("Your presentation was phenomenal", "your presentation was fire"),
            ("The party was incredibly enjoyable", "the party was lit"),
            
            # Internet culture
            ("That's creating a lot of drama", "that's creating tea"),
            ("Stop showing off so much", "stop flexing"),
            ("You're being overly dramatic", "you're being extra"),
            ("I understand what you mean", "i vibe with that"),
            ("That's completely true", "that's fax"),
            ("I'm leaving now", "i'm out"),
            ("Let's do this", "let's gooo"),
            ("That's embarrassing", "that's cringe"),
            
            # Modern youth expressions
            ("That music is really good", "that music slaps"),
            ("You look very stylish", "you got drip"),
            ("I'm throwing this away", "gonna yeet this"),
            ("Don't lie to me", "don't cap"),
            ("I'm telling the truth", "no cap"),
            ("That made me very angry", "that made me salty"),
            ("The food tastes amazing", "the food is bussin"),
            ("That's basic and unoriginal", "that's basic"),
        ]
        
        return training_examples
    
    def _initialize_model(self):
        """Initialize simple neural network model"""
        # Feature dimensions
        self.input_size = 100  # Word embedding size
        self.hidden_size = 50
        self.output_size = 100
        
        # Initialize weights randomly
        self.weights['W1'] = [[random.uniform(-1, 1) for _ in range(self.hidden_size)] 
                              for _ in range(self.input_size)]
        self.weights['W2'] = [[random.uniform(-1, 1) for _ in range(self.output_size)] 
                              for _ in range(self.hidden_size)]
        
        # Initialize biases
        self.bias['b1'] = [random.uniform(-1, 1) for _ in range(self.hidden_size)]
        self.bias['b2'] = [random.uniform(-1, 1) for _ in range(self.output_size)]
        
        # Word to index mapping
        self.word_to_index = {}
        self.index_to_word = {}
        self._build_vocabulary()
    
    def _build_vocabulary(self):
        """Build vocabulary from training data"""
        all_words = set()
        for complex_text, simple_text in self.training_data:
            words = self._tokenize(complex_text.lower()) + self._tokenize(simple_text.lower())
            all_words.update(words)
        
        # Add vocabulary mapping words
        for word in self.complex_to_simple.keys():
            all_words.add(word)
        for word in self.complex_to_simple.values():
            all_words.add(word)
        
        # Add internet slang
        for phrase in self.internet_slang.keys():
            all_words.update(phrase.split())
        for slang in self.internet_slang.values():
            all_words.update(slang.split())
        
        # Create word-to-index mapping
        for i, word in enumerate(sorted(all_words)):
            self.word_to_index[word] = i
            self.index_to_word[i] = word
    
    def _tokenize(self, text):
        """Simple tokenization"""
        # Remove punctuation and split
        text = re.sub(r'[^\w\s\']', '', text.lower())
        return text.split()
    
    def _text_to_vector(self, text):
        """Convert text to feature vector"""
        words = self._tokenize(text)
        vector = [0.0] * self.input_size
        
        for word in words:
            if word in self.word_to_index:
                idx = self.word_to_index[word] % self.input_size
                vector[idx] += 1.0
        
        # Normalize
        total = sum(vector)
        if total > 0:
            vector = [v / total for v in vector]
        
        return vector
    
    def _sigmoid(self, x):
        """Sigmoid activation function"""
        return 1 / (1 + math.exp(-max(-500, min(500, x))))
    
    def _forward_pass(self, input_vector):
        """Forward pass through the network"""
        # Hidden layer
        hidden = []
        for i in range(self.hidden_size):
            weighted_sum = sum(input_vector[j] * self.weights['W1'][j][i] 
                             for j in range(self.input_size)) + self.bias['b1'][i]
            hidden.append(self._sigmoid(weighted_sum))
        
        # Output layer
        output = []
        for i in range(self.output_size):
            weighted_sum = sum(hidden[j] * self.weights['W2'][j][i] 
                             for j in range(self.hidden_size)) + self.bias['b2'][i]
            output.append(self._sigmoid(weighted_sum))
        
        return hidden, output
    
    def _train_model(self):
        """Train the neural network model silently"""
        epochs = 30  # Balanced for performance
        
        for epoch in range(epochs):
            total_error = 0
            
            for complex_text, simple_text in self.training_data:
                try:
                    # Convert to vectors
                    input_vec = self._text_to_vector(complex_text)
                    target_vec = self._text_to_vector(simple_text)
                    
                    # Forward pass
                    hidden, output = self._forward_pass(input_vec)
                    
                    # Calculate error (simplified)
                    if len(output) == len(target_vec):
                        error = sum((target_vec[i] - output[i]) ** 2 for i in range(len(output)))
                        total_error += error
                    
                    # Simple weight update (simplified backpropagation)
                    for i in range(min(self.output_size, len(output))):
                        for j in range(self.hidden_size):
                            if i < len(target_vec):
                                gradient = 2 * (output[i] - target_vec[i]) * output[i] * (1 - output[i])
                                self.weights['W2'][j][i] -= self.learning_rate * gradient * hidden[j]
                
                except Exception:
                    # Skip problematic training examples
                    continue
    
    def _apply_vocabulary_mapping(self, text):
        """Apply direct vocabulary mappings"""
        words = self._tokenize(text)
        result = []
        
        for word in words:
            if word in self.complex_to_simple:
                result.append(self.complex_to_simple[word])
            else:
                result.append(word)
        
        return ' '.join(result)
    
    def _apply_phrase_mappings(self, text):
        """Apply phrase-level transformations"""
        result = text.lower()
        
        for pattern, replacement in self.phrase_mappings.items():
            result = re.sub(pattern, replacement, result, flags=re.IGNORECASE)
        
        return result
    
    def _classify_intent(self, text):
        """Classify the intent of the input text"""
        text_lower = text.lower()
        
        # Check for greetings
        for pattern in self.greeting_patterns['formal_greetings']:
            if re.search(pattern, text_lower):
                return 'greeting'
        
        for pattern in self.greeting_patterns['casual_greetings']:
            if re.search(pattern, text_lower):
                return 'casual_greeting'
        
        # Check for wellbeing questions
        for pattern in self.greeting_patterns['questions_about_wellbeing']:
            if re.search(pattern, text_lower):
                return 'wellbeing_question'
        
        # Check for thanks
        for pattern in self.greeting_patterns['thanks_expressions']:
            if re.search(pattern, text_lower):
                return 'thanks'
        
        # Check for questions
        if '?' in text or any(q in text_lower for q in ['what', 'who', 'where', 'when', 'why', 'how']):
            return 'question'
        
        # Check for excitement
        if any(word in text_lower for word in ['amazing', 'awesome', 'incredible', 'fantastic', '!']):
            return 'excitement'
        
        # Check for agreement/disagreement
        if any(word in text_lower for word in ['agree', 'right', 'correct', 'true']):
            return 'agreement'
        if any(word in text_lower for word in ['disagree', 'wrong', 'incorrect', 'false']):
            return 'disagreement'
        
        return 'general'
    
    def _apply_intent_based_simplification(self, text, intent):
        """Apply simplification based on classified intent"""
        if intent == 'greeting':
            return random.choice(['hey', 'hi', 'hello', 'yo'])
        elif intent == 'casual_greeting':
            return text.lower()
        elif intent == 'wellbeing_question':
            return random.choice(['how are you?', 'what\'s up?', 'how\'s it going?', 'you good?'])
        elif intent == 'thanks':
            return random.choice(['thanks', 'thx', 'ty', 'thanks!'])
        elif intent == 'excitement':
            if 'amazing' in text.lower() or 'incredible' in text.lower():
                return random.choice(['that\'s sick!', 'that\'s fire!', 'omg yes!', 'let\'s gooo!'])
        elif intent == 'agreement':
            return random.choice(self.conversational_responses['agreement'])
        elif intent == 'disagreement':
            return random.choice(self.conversational_responses['disagreement'])
        elif intent == 'question' and 'what' in text.lower() and 'mean' in text.lower():
            return 'wdym?'
        
        return text
    
    def _ml_simplify(self, text):
        """Use ML model to simplify text"""
        input_vec = self._text_to_vector(text)
        hidden, output = self._forward_pass(input_vec)
        
        # Find most activated words in output
        top_indices = sorted(range(len(output)), key=lambda i: output[i], reverse=True)[:5]
        
        result_words = []
        for idx in top_indices:
            if idx < len(self.index_to_word) and output[idx] > 0.5:
                result_words.append(self.index_to_word[idx])
        
        return ' '.join(result_words) if result_words else text
    
    def simplify(self, text):
        """Main simplification function combining all techniques"""
        if not text or not text.strip():
            return text
        
        # Step 1: Classify intent
        intent = self._classify_intent(text)
        
        # Step 2: Apply intent-based simplification for common patterns
        intent_result = self._apply_intent_based_simplification(text, intent)
        if intent_result != text:
            return intent_result
        
        # Step 3: Apply phrase mappings
        phrase_result = self._apply_phrase_mappings(text)
        
        # Step 4: Apply vocabulary mappings
        vocab_result = self._apply_vocabulary_mapping(phrase_result)
        
        # Step 5: Apply ML model for additional simplification
        ml_result = self._ml_simplify(vocab_result)
        
        # Step 6: Post-processing cleanup
        final_result = self._post_process(ml_result if ml_result.strip() else vocab_result)
        
        return final_result
    
    def _post_process(self, text):
        """Post-process the simplified text"""
        # Remove extra spaces
        text = re.sub(r'\s+', ' ', text.strip())
        
        # Handle question marks
        question_words = ['how', 'what', 'why', 'when', 'where', 'who', 'wyd', 'wdym', 'fr']
        if any(word in text.lower() for word in question_words):
            if not text.endswith('?'):
                text += '?'
        
        # Don't capitalize certain internet slang
        no_caps = ['lol', 'lmao', 'omg', 'wtf', 'wth', 'idk', 'idc', 'ngl', 'tbh', 
                   'imo', 'imho', 'fyi', 'asap', 'rn', 'af', 'btw', 'smh', 'rofl']
        
        if text and not any(text.lower().startswith(word) for word in no_caps):
            text = text[0].upper() + text[1:]
        
        return text
    
    def get_conversational_response(self, text):
        """Generate a conversational response based on the input"""
        simplified = self.simplify(text)
        intent = self._classify_intent(text)
        
        # Add some conversational flair based on intent
        if intent == 'wellbeing_question':
            responses = [
                "i'm good! " + simplified,
                "doing great! " + simplified,
                "all good here! " + simplified,
                "vibing! " + simplified
            ]
            return random.choice(responses)
        
        return simplified
    
    def batch_simplify(self, texts):
        """Simplify multiple texts at once"""
        return [self.simplify(text) for text in texts]
    
    def add_custom_mapping(self, complex_word, simple_word):
        """Add custom vocabulary mapping"""
        self.complex_to_simple[complex_word.lower()] = simple_word.lower()
    
    def get_simplification_confidence(self, original, simplified):
        """Calculate confidence score for simplification"""
        original_words = set(self._tokenize(original.lower()))
        simplified_words = set(self._tokenize(simplified.lower()))
        
        # Calculate word reduction ratio
        reduction_ratio = 1 - (len(simplified_words) / max(len(original_words), 1))
        
        # Calculate known word mapping ratio
        mapped_words = sum(1 for word in original_words if word in self.complex_to_simple)
        mapping_ratio = mapped_words / max(len(original_words), 1)
        
        # Check for slang usage
        slang_words = sum(1 for word in simplified_words 
                         if word in self.internet_slang.values())
        slang_ratio = slang_words / max(len(simplified_words), 1)
        
        # Combined confidence score
        confidence = (reduction_ratio * 0.3) + (mapping_ratio * 0.4) + (slang_ratio * 0.3)
        return min(1.0, max(0.0, confidence))

# Example usage and testing
def test_text_simplifier():
    """Test the text simplifier with various examples"""
    simplifier = TextSimplifier()
    
    test_cases = [
        # Original test cases
        "Good morning, how are you doing today?",
        "I would like to inquire about your current status",
        "Thank you very much for your assistance",
        
        # Modern slang tests
        "That's absolutely hilarious!",
        "I can't believe what just happened",
        "To be honest, I don't really care about that",
        "Oh my god, that's incredible!",
        "Not going to lie, that was impressive",
        
        # Internet culture
        "That outfit looks amazing on you",
        "This food is exceptionally delicious",
        "Are you being serious right now?",
        "That's really suspicious behavior",
        "I'm extremely exhausted today",
        
        # Conversational
        "What are your thoughts on this matter?",
        "I strongly disagree with that statement",
        "That's the best thing I've ever seen!",
        "I'm feeling melancholy today",
        "Your presentation was phenomenal",
        
        # Mixed formal and casual
        "By the way, the meeting is at 3pm",
        "For your information, the project is complete",
        "I appreciate your prompt response",
        "The weather is absolutely magnificent today",
        "That party was incredibly enjoyable"
    ]
    
    print("šŸ”„ === Enhanced Text Simplification Results === šŸ”„\n")
    
    for i, text in enumerate(test_cases, 1):
        simplified = simplifier.simplify(text)
        confidence = simplifier.get_simplification_confidence(text, simplified)
        
        # Emoji based on confidence
        if confidence >= 0.5:
            emoji = "šŸ’Æ"
        elif confidence >= 0.3:
            emoji = "✨"
        else:
            emoji = "šŸ‘"
        
        print(f"{i:2d}. Original:   {text}")
        print(f"    Simplified: {simplified} {emoji}")
        print(f"    Confidence: {confidence:.2f}")
        print()

def demo_modern_conversations():
    """Demo modern conversational examples"""
    simplifier = TextSimplifier()
    
    conversations = [
        ("User", "Hey there! How's everything going with you today?"),
        ("User", "I just saw the most incredible thing ever!"),
        ("User", "To be honest, I'm not feeling great about this situation"),
        ("User", "Oh my goodness, did you see what happened?"),
        ("User", "That's extremely suspicious if you ask me"),
        ("User", "I'm absolutely exhausted from all this work"),
        ("User", "Your new hairstyle looks phenomenal!"),
        ("User", "Not going to lie, that was pretty impressive"),
        ("User", "What do you think about the new update?"),
        ("User", "I completely disagree with that opinion"),
    ]
    
    print("\nšŸ’¬ === Modern Conversation Demo === šŸ’¬\n")
    
    for speaker, text in conversations:
        response = simplifier.get_conversational_response(text)
        print(f"{speaker}: {text}")
        print(f"Bot: {response}")
        print()

def interactive_simplifier():
    """Interactive text simplification with modern slang"""
    simplifier = TextSimplifier()
    
    print("šŸš€ === Interactive Modern Text Simplifier === šŸš€")
    print("šŸ’¬ Type any text and watch it get simplified with modern slang!")
    print("šŸ”„ Try formal language, emotions, questions, reactions...")
    print("āŒ Type 'quit', 'exit', or 'q' to stop")
    print("=" * 60)
    
    example_suggestions = [
        "That's absolutely incredible!",
        "I'm extremely tired from work today",
        "Oh my god, did you see that?",
        "To be honest, I don't understand",
        "Your outfit looks amazing!",
        "Not going to lie, that was awesome",
        "I completely agree with your opinion",
        "Are you being serious right now?"
    ]
    
    print("\nšŸ”„ Try these examples:")
    for i, example in enumerate(example_suggestions[:4], 1):
        print(f"{i}. {example}")
    print()
    
    conversation_history = []
    
    while True:
        try:
            user_input = input("šŸ‘¤ You: ").strip()
            
            if user_input.lower() in ['quit', 'exit', 'q']:
                print("āœŒļø Later! Thanks for chatting!")
                break
            
            if not user_input:
                print("šŸ˜… Say something...")
                continue
            
            # Process and respond
            response = simplifier.get_conversational_response(user_input)
            confidence = simplifier.get_simplification_confidence(user_input, response)
            
            # Show the bot response
            print(f"šŸ¤– Bot: {response}")
            
            # Add to conversation history
            conversation_history.append((user_input, response))
            
            # Show stats occasionally
            if len(conversation_history) % 5 == 0:
                print(f"\nšŸ“Š Conversation streak: {len(conversation_history)} exchanges!")
            
            print()
            
        except KeyboardInterrupt:
            print("\n\nāœŒļø Peace out! Thanks for the chat!")
            break
        except Exception as e:
            print(f"😬 Oops: {e}")
            print("Try again...")
            continue

if __name__ == "__main__":
    print("šŸš€ Modern Text Simplification System Ready!")
    print("=" * 50)
    
    # Run tests first to show capabilities
    test_text_simplifier()
    
    # Then show conversation demo
    demo_modern_conversations()
    
    # Start interactive mode
    print("\n" + "=" * 60)
    interactive_simplifier()