438 lines
19 KiB
Python
438 lines
19 KiB
Python
import re
|
||
import random
|
||
from typing import List, Dict, Tuple, Optional
|
||
import jieba
|
||
import jieba.posseg as pseg
|
||
|
||
|
||
class EnhancedArticleRewriter:
|
||
"""
|
||
增强版文章智能改写工具
|
||
实现更自然的句子变化,符合人工书写逻辑
|
||
"""
|
||
|
||
def __init__(self):
|
||
# 标点符号定义
|
||
self.sentence_endings = ['。', '!', '?', '…']
|
||
self.pause_marks = [',', ';', ':', '、']
|
||
|
||
# 句子长度分布(模拟人工书写习惯)
|
||
self.sentence_length_distribution = {
|
||
'short': (5, 15), # 短句
|
||
'medium': (16, 30), # 中句
|
||
'long': (31, 50), # 长句
|
||
'extra_long': (51, 80) # 超长句
|
||
}
|
||
|
||
# 连接词库(更丰富的连接词)
|
||
self.connectors = {
|
||
'sequence': ['随后', '接着', '然后', '紧接着', '继而', '进而'],
|
||
'addition': ['并且', '同时', '此外', '另外', '再者', '况且', '而且'],
|
||
'contrast': ['但是', '然而', '不过', '可是', '却', '反而', '相反'],
|
||
'cause': ['因为', '由于', '因此', '所以', '故而', '从而'],
|
||
'condition': ['如果', '假如', '倘若', '若是', '要是'],
|
||
'concession': ['虽然', '尽管', '即使', '纵然', '固然'],
|
||
'summary': ['总之', '综上', '总的来说', '概括地说', '简言之'],
|
||
'example': ['比如', '例如', '譬如', '好比', '正如'],
|
||
'emphasis': ['特别是', '尤其是', '更重要的是', '值得注意的是'],
|
||
'explanation': ['也就是说', '换句话说', '具体来说', '准确地说']
|
||
}
|
||
|
||
# 句式模板
|
||
self.sentence_patterns = {
|
||
'statement': ['{}'], # 陈述句
|
||
'emphasis_front': ['值得注意的是,{}', '需要强调的是,{}', '重要的是,{}'],
|
||
'emphasis_back': ['{},这一点尤为重要', '{},这是关键所在'],
|
||
'question_rhetorical': ['难道不是{}吗?', '{},不是吗?'],
|
||
'parallel': ['不仅{},而且{}', '既{},又{}', '一方面{},另一方面{}'],
|
||
'progressive': ['先是{},然后{}', '从{}到{}', '由{}发展到{}']
|
||
}
|
||
|
||
# 同义词/近义词替换库
|
||
self.synonyms = {
|
||
'发展': ['演进', '进步', '演变', '发展', '进化', '提升', '推进'],
|
||
'改变': ['变化', '转变', '改变', '变革', '转换', '调整', '革新'],
|
||
'重要': ['关键', '重要', '核心', '主要', '根本', '要紧', '关键性'],
|
||
'影响': ['作用', '影响', '效应', '冲击', '波及', '涉及'],
|
||
'提高': ['提升', '增强', '改善', '优化', '加强', '增进'],
|
||
'显示': ['表明', '显示', '说明', '揭示', '体现', '反映', '展现'],
|
||
'通过': ['利用', '运用', '借助', '凭借', '依靠', '经由'],
|
||
'实现': ['达成', '实现', '完成', '达到', '做到', '落实'],
|
||
'问题': ['难题', '问题', '挑战', '困难', '障碍', '瓶颈'],
|
||
'方法': ['方式', '手段', '途径', '办法', '策略', '措施'],
|
||
'需要': ['需要', '要求', '必须', '应该', '亟需', '急需'],
|
||
'能够': ['能够', '可以', '能', '可', '得以', '足以'],
|
||
'非常': ['十分', '相当', '特别', '格外', '极其', '异常', '颇为'],
|
||
'很多': ['许多', '大量', '众多', '诸多', '不少', '大批'],
|
||
'所有': ['全部', '一切', '所有', '整个', '全体', '各个'],
|
||
'已经': ['已', '已经', '业已', '早已', '都已'],
|
||
'正在': ['正', '正在', '在', '正处于', '目前正'],
|
||
'越来越': ['日益', '愈发', '愈加', '更加', '日渐', '渐渐'],
|
||
'不断': ['持续', '不断', '连续', '陆续', '继续', '频繁'],
|
||
'各种': ['各类', '各种', '多种', '种种', '诸般', '多样'],
|
||
}
|
||
|
||
def _get_random_sentence_length_type(self) -> str:
|
||
"""根据正态分布随机选择句子长度类型"""
|
||
# 模拟人工书写的句长分布:中句最多,短句和长句次之,超长句最少
|
||
weights = {'short': 25, 'medium': 40, 'long': 25, 'extra_long': 10}
|
||
types = list(weights.keys())
|
||
probs = [weights[t] / 100 for t in types]
|
||
return random.choices(types, weights=probs)[0]
|
||
|
||
def _smart_split_merge_sentences(self, sentences: List[str]) -> List[str]:
|
||
"""智能拆分和合并句子,创造自然的长短句节奏"""
|
||
if not sentences:
|
||
return sentences
|
||
|
||
result = []
|
||
i = 0
|
||
|
||
while i < len(sentences):
|
||
# 获取目标句长类型
|
||
target_type = self._get_random_sentence_length_type()
|
||
min_len, max_len = self.sentence_length_distribution[target_type]
|
||
|
||
current_sentence = sentences[i].strip()
|
||
current_len = len(current_sentence)
|
||
|
||
# 如果当前句子太长,尝试拆分
|
||
if current_len > max_len:
|
||
split_sentences = self._split_sentence_naturally(current_sentence, max_len)
|
||
result.extend(split_sentences)
|
||
|
||
# 如果当前句子太短,尝试与下一句合并
|
||
elif current_len < min_len and i + 1 < len(sentences):
|
||
# 30%概率合并短句
|
||
if random.random() < 0.3:
|
||
merged = self._merge_sentences(current_sentence, sentences[i + 1])
|
||
result.append(merged)
|
||
i += 1 # 跳过下一句
|
||
else:
|
||
result.append(current_sentence)
|
||
|
||
# 长度合适,直接添加
|
||
else:
|
||
result.append(current_sentence)
|
||
|
||
i += 1
|
||
|
||
return result
|
||
|
||
def _split_sentence_naturally(self, sentence: str, max_length: int) -> List[str]:
|
||
"""自然地拆分长句"""
|
||
if len(sentence) <= max_length:
|
||
return [sentence]
|
||
|
||
# 保存句尾标点
|
||
ending = ''
|
||
for mark in self.sentence_endings:
|
||
if sentence.endswith(mark):
|
||
ending = mark
|
||
sentence = sentence[:-len(mark)]
|
||
break
|
||
|
||
# 优先在逗号处拆分
|
||
parts = []
|
||
if ',' in sentence:
|
||
segments = sentence.split(',')
|
||
current = ""
|
||
|
||
for i, segment in enumerate(segments):
|
||
if not current:
|
||
current = segment
|
||
elif len(current + ',' + segment) <= max_length:
|
||
current += ',' + segment
|
||
else:
|
||
# 添加句号使其成为完整句子
|
||
if random.random() < 0.7: # 70%概率添加句号
|
||
parts.append(current + '。')
|
||
else: # 30%概率使用其他句尾
|
||
parts.append(current + random.choice(['。', '!', '']))
|
||
current = segment
|
||
|
||
# 处理最后一部分
|
||
if current:
|
||
parts.append(current + ending)
|
||
else:
|
||
# 如果没有逗号,尝试在其他标点处拆分
|
||
parts = [sentence + ending]
|
||
|
||
return parts if parts else [sentence + ending]
|
||
|
||
def _merge_sentences(self, sent1: str, sent2: str) -> str:
|
||
"""智能合并两个句子"""
|
||
# 移除第一个句子的句尾标点
|
||
for mark in self.sentence_endings:
|
||
if sent1.endswith(mark):
|
||
sent1 = sent1[:-len(mark)]
|
||
break
|
||
|
||
# 选择连接方式
|
||
merge_type = random.choice(['comma', 'connector', 'semicolon'])
|
||
|
||
if merge_type == 'comma':
|
||
return sent1 + ',' + sent2
|
||
elif merge_type == 'connector':
|
||
# 随机选择连接词类型
|
||
conn_type = random.choice(list(self.connectors.keys()))
|
||
connector = random.choice(self.connectors[conn_type])
|
||
return sent1 + ',' + connector + sent2
|
||
else: # semicolon
|
||
return sent1 + ';' + sent2
|
||
|
||
def _replace_synonyms(self, text: str, intensity: float) -> str:
|
||
"""同义词替换"""
|
||
words = list(jieba.cut(text))
|
||
result = []
|
||
|
||
for word in words:
|
||
if word in self.synonyms and random.random() < intensity:
|
||
# 选择一个同义词(避免选到原词)
|
||
alternatives = [w for w in self.synonyms[word] if w != word]
|
||
if alternatives:
|
||
result.append(random.choice(alternatives))
|
||
else:
|
||
result.append(word)
|
||
else:
|
||
result.append(word)
|
||
|
||
return ''.join(result)
|
||
|
||
def _adjust_sentence_structure(self, sentence: str, intensity: float) -> str:
|
||
"""调整句子结构,使其更自然"""
|
||
if random.random() > intensity:
|
||
return sentence
|
||
|
||
# 保存句尾标点
|
||
ending = ''
|
||
for mark in self.sentence_endings:
|
||
if sentence.endswith(mark):
|
||
ending = mark
|
||
sentence = sentence[:-len(mark)]
|
||
break
|
||
|
||
# 随机选择调整方式
|
||
adjust_type = random.choice(['reorder', 'add_emphasis', 'change_pattern'])
|
||
|
||
if adjust_type == 'reorder' and ',' in sentence:
|
||
# 重新排列子句
|
||
parts = sentence.split(',')
|
||
if len(parts) >= 2:
|
||
# 智能重排:不是完全随机,而是有逻辑的调整
|
||
if len(parts) == 2:
|
||
# 两个子句直接交换
|
||
sentence = parts[1] + ',' + parts[0]
|
||
else:
|
||
# 多个子句,将中间的提前或延后
|
||
mid_idx = len(parts) // 2
|
||
if random.random() < 0.5:
|
||
# 中间提前
|
||
parts = [parts[mid_idx]] + parts[:mid_idx] + parts[mid_idx + 1:]
|
||
else:
|
||
# 中间延后
|
||
parts = parts[:mid_idx] + parts[mid_idx + 1:] + [parts[mid_idx]]
|
||
sentence = ','.join(parts)
|
||
|
||
elif adjust_type == 'add_emphasis':
|
||
# 添加强调
|
||
if random.random() < 0.3:
|
||
pattern = random.choice(self.sentence_patterns['emphasis_front'])
|
||
sentence = pattern.format(sentence)
|
||
elif random.random() < 0.3:
|
||
pattern = random.choice(self.sentence_patterns['emphasis_back'])
|
||
sentence = pattern.format(sentence)
|
||
|
||
elif adjust_type == 'change_pattern':
|
||
# 改变句式
|
||
if ',' in sentence and random.random() < 0.4:
|
||
parts = sentence.split(',', 1)
|
||
if len(parts) == 2:
|
||
# 使用并列或递进句式
|
||
if random.random() < 0.5:
|
||
sentence = f"不仅{parts[0]},而且{parts[1]}"
|
||
else:
|
||
sentence = f"{parts[0]},进而{parts[1]}"
|
||
|
||
return sentence + ending
|
||
|
||
def _add_natural_variations(self, sentence: str, intensity: float) -> str:
|
||
"""添加自然的语言变化"""
|
||
if random.random() > intensity:
|
||
return sentence
|
||
|
||
variations = []
|
||
|
||
# 20%概率添加过渡词
|
||
if random.random() < 0.2:
|
||
transition = random.choice(['其实', '事实上', '实际上', '确实', '显然'])
|
||
variations.append(f"{transition},{sentence}")
|
||
|
||
# 15%概率添加程度副词
|
||
if random.random() < 0.15:
|
||
adverb = random.choice(['更', '更加', '尤其', '特别', '格外'])
|
||
# 简单地在"是"、"有"、"能"等词前添加副词
|
||
for verb in ['是', '有', '能', '会', '要']:
|
||
if verb in sentence:
|
||
sentence = sentence.replace(verb, f"{adverb}{verb}", 1)
|
||
break
|
||
|
||
return variations[0] if variations else sentence
|
||
|
||
def rewrite(self, text: str, config: Dict = None) -> str:
|
||
"""
|
||
主函数:改写文章
|
||
|
||
参数:
|
||
text: 输入文章
|
||
config: 配置字典
|
||
- intensity: 修改强度 0.0-1.0
|
||
- preserve_meaning: 是否保持原意
|
||
- natural_flow: 是否保持自然流畅
|
||
- vary_sentence_length: 是否变化句长
|
||
"""
|
||
if config is None:
|
||
config = {}
|
||
|
||
# 设置默认值
|
||
config.setdefault('intensity', 0.6)
|
||
config.setdefault('preserve_meaning', True)
|
||
config.setdefault('natural_flow', True)
|
||
config.setdefault('vary_sentence_length', True)
|
||
|
||
intensity = config['intensity']
|
||
|
||
# 分段处理
|
||
paragraphs = [p.strip() for p in text.split('\n') if p.strip()]
|
||
result_paragraphs = []
|
||
|
||
for para in paragraphs:
|
||
# 分句
|
||
sentences = self._split_sentences(para)
|
||
|
||
# 1. 首先进行句子长短调整
|
||
if config['vary_sentence_length']:
|
||
sentences = self._smart_split_merge_sentences(sentences)
|
||
|
||
# 2. 处理每个句子
|
||
processed_sentences = []
|
||
for i, sent in enumerate(sentences):
|
||
# 同义词替换
|
||
sent = self._replace_synonyms(sent, intensity * 0.5)
|
||
|
||
# 句子结构调整
|
||
sent = self._adjust_sentence_structure(sent, intensity * 0.7)
|
||
|
||
# 添加自然变化
|
||
sent = self._add_natural_variations(sent, intensity * 0.3)
|
||
|
||
processed_sentences.append(sent)
|
||
|
||
# 3. 段落重组(偶尔调整句子顺序)
|
||
if len(processed_sentences) > 3 and random.random() < intensity * 0.2:
|
||
# 20%概率微调句子顺序(只交换相邻句子)
|
||
idx = random.randint(0, len(processed_sentences) - 2)
|
||
processed_sentences[idx], processed_sentences[idx + 1] = \
|
||
processed_sentences[idx + 1], processed_sentences[idx]
|
||
|
||
result_paragraphs.append(''.join(processed_sentences))
|
||
|
||
return '\n\n'.join(result_paragraphs)
|
||
|
||
def _split_sentences(self, text: str) -> List[str]:
|
||
"""改进的句子分割"""
|
||
# 处理多种句尾标点
|
||
pattern = '([。!?…]+)'
|
||
parts = re.split(pattern, text)
|
||
|
||
sentences = []
|
||
for i in range(0, len(parts) - 1, 2):
|
||
if parts[i].strip():
|
||
sentences.append(parts[i] + parts[i + 1])
|
||
|
||
# 处理最后一个部分
|
||
if len(parts) % 2 == 1 and parts[-1].strip():
|
||
sentences.append(parts[-1] + '。') # 添加默认句号
|
||
|
||
return sentences
|
||
|
||
|
||
def demo():
|
||
"""使用示例"""
|
||
sample_text = """
|
||
最近,晓蕾又上热搜了!
|
||
|
||
咋回事呢?原来,她和老公刘剑一起开了直播带货的副业。但特意声明:她早就离开了上海电视台的编制,也不拿电视台的工资。换句话说,现在卖东西,完全是私营业态。
|
||
|
||
这事儿一下子引爆了大家的八卦魂。毕竟,明星主持扎堆直播间,也不算新鲜事。但还是挺多人纳闷:这些当年的 "话筒头牌",是不是集体选择摆烂了?
|
||
|
||
其实,晓蕾和刘剑干脆落落大方,在直播间直接回应了这点。俩人意思很明确:“我们不是来拉低职业口碑的”。而且还耐心解释了自己转行的理由。
|
||
曾经的大佬,变成了烟火气
|
||
|
||
说到晓蕾,不了解点她背景都不好意思讨论人家。当年上视新闻部的 "当家花旦",光学历和气质,足够秒杀隔壁主持圈的八条街。而刘剑,早年可是 "台柱子",播音腔精致到令人耳膜怀孕。照理来说,这样一对,在编制铁饭碗里躺平一辈子没毛病。
|
||
|
||
可人家偏不。
|
||
|
||
晓蕾说过这样一句话:“其实,我就是个普通人。” 真的那么普通吗?她不这么说,没人敢忘了她的标杆履历啊!她离开台里后,居然一头扎进了童语言教育这个赛道,一干就是十年,让机构做到了业内小圈子的爆款水准。
|
||
|
||
而这次直播,打的商品也不混乱,主打性价比和实用属性,晓蕾每件商品还得亲测过。如果你觉得她自吹自擂,建议去看看她直播间的粉丝评论。大家的意思是:晓蕾推品 = ·9放心买。
|
||
刘剑这枚 “前一哥”,更狠!
|
||
|
||
说晓蕾牛,别忘了,刘剑十年前也上演了一场 “豪赌”。那个年代,辞去电视台稳定工作,和 “打水漂” 差不多。
|
||
|
||
可是刘剑敢把梭全下,为啥?因为他看中了播音考生和辅导课程的市场,那时还没有多少人扎堆干这块,他觉得这是个机会。
|
||
|
||
果然,就这么辞了职,工作的腰板从跟组织吃工资,摇身变成了名副其实的事业单位 —— 自己家老板。虽然后来也是磕磕绊绊,但终究从试验田里掘出了一片肥沃地。
|
||
主持人的 “下海”,是换方向走
|
||
|
||
有人觉得,曾经的新闻人、主持人 “跑去带货”,肯定是混不下去了。你要放在十年前,这种联想不稀奇,可现在不一样了。大环境变了,传统媒体是真的在互联网时代被打败得找不到调。
|
||
|
||
原来电视频道的观众,现在早转移到手机端,看知乎、刷短视频,甚至晚上蹲个带货直播会。你说新闻节目的高冷主播,现在换脸做带货主持,是不是 “落魄”?未必。
|
||
|
||
晓蕾夫妻这一波,实际上是转型很成功的范例。不管带啥网红货,他们俩把品质第一的逻辑摆明白了。这样的主播,不止卖产品,更卖信誉,靠着时间积攒了观众的信任。
|
||
直播间哪门子 LOW?明明是主战场
|
||
|
||
网友说得有趣:“谁嫌直播带货 LOW,谁就输定了。” 道理没跑儿,移动互联网成了咱生活重心,生意也跟着迁移。这是明显趋势,看不懂的还真不想赚钱了。
|
||
|
||
而且,做直播一点不轻松。站几个小时口播、随时照顾弹幕情绪,这比坐着念提词器辛苦多了。像晓蕾和刘剑这样的 “摸鱼资历”,能转过身来赚饭钱,这不是 “混”,是 “拼” 啊。
|
||
|
||
别说传统意义的职业崇拜消失殆尽,你觉得稳如狗的岗位,说散架就散。老一辈金饭碗情结,对于下一代新创别说香,而是种被淘汰跑赢速度内心创新积极点。
|
||
|
||
我不是电视台员工了,早就离职 10 年了。
|
||
"""
|
||
|
||
rewriter = EnhancedArticleRewriter()
|
||
|
||
print("=" * 60)
|
||
print("原文:")
|
||
print("=" * 60)
|
||
print(sample_text)
|
||
|
||
# 测试不同强度的改写
|
||
for intensity in [0.3, 0.6, 0.9]:
|
||
print(f"\n{'=' * 60}")
|
||
print(f"改写强度: {intensity}")
|
||
print("=" * 60)
|
||
|
||
config = {
|
||
'intensity': intensity,
|
||
'preserve_meaning': True,
|
||
'natural_flow': True,
|
||
'vary_sentence_length': True
|
||
}
|
||
|
||
result = rewriter.rewrite(sample_text, config)
|
||
print(result)
|
||
|
||
# 统计句子长度分布
|
||
sentences = re.split('[。!?…]+', result)
|
||
lengths = [len(s) for s in sentences if s.strip()]
|
||
if lengths:
|
||
print(f"\n句子长度分布: 最短={min(lengths)}, 最长={max(lengths)}, 平均={sum(lengths) / len(lengths):.1f}")
|
||
print(f"句子数量: {len(lengths)}")
|
||
|
||
|
||
if __name__ == '__main__':
|
||
# 注意:需要安装jieba库
|
||
# pip install jieba
|
||
demo() |