当前位置: 首页 > news >正文

java使用文本相似度检测可以调整阈值

 基本思路

*SimHash + 汉明距离:适合处理长文本,计算效率高,抗噪能力强,内存占用低

* HanLP分词:专业中文分词工具,支持自定义词典和停用词

* Guava库:提供高效的布隆过滤器实现,用于快速去重

* 并行处理:利用Java8 Stream API加速大规模文本处理

* 动态阈值:根据文本长度自动调整相似度判断策略,解决长短文本问题

</dependency><!-- 并发工具 --><dependency><groupId>com.google.guava</groupId><artifactId>guava</artifactId><version>30.1-jre</version> <!-- 30.x 支持 JDK 1.8 --></dependency><!-- SimHash实现 --><dependency><groupId>org.apache.commons</groupId><artifactId>commons-text</artifactId><version>1.10.0</version></dependency><!-- 编辑距离算法 --><dependency><groupId>org.apache.commons</groupId><artifactId>commons-lang3</artifactId><version>3.12.0</version></dependency><dependency><groupId>com.hankcs</groupId><artifactId>hanlp</artifactId><version>portable-1.8.4</version> <!-- 最新稳定版 --></dependency>


import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import com.google.common.hash.BloomFilter;
import com.google.common.hash.Funnels;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import java.util.stream.IntStream;/*** SimHash + 汉明距离:适合处理长文本,计算效率高,抗噪能力强,内存占用低** HanLP分词:专业中文分词工具,支持自定义词典和停用词** Guava库:提供高效的布隆过滤器实现,用于快速去重** 并行处理:利用Java8 Stream API加速大规模文本处理** 动态阈值:根据文本长度自动调整相似度判断策略,解决长短文本问题*** 方案特点与优势* 多级相似度计算:* 汉明距离:快速初筛* Jaccard系数:优化短文本匹配* 余弦相似度:精确计算长文本语义* 高效处理机制:*      布隆过滤器:快速去重(10,000元素仅需23KB内存)* 并行计算:利用多核CPU加速处理* SimHash压缩:将文本压缩为64位指纹* 中文优化:* 名词优先:聚焦核心语义单元* 分词过滤:保留名词/动词等实义词* N-Gram特征:捕获局部语序信息***/
public class ChineseTextSimilarityFinder {// 配置参数private static final int SIMHASH_BITS = 64;          // SimHash位数private static final double BASE_SIMILARITY = 0.7;  // 基础相似度阈值private static final int MIN_TEXT_LENGTH = 20;      // 短文本阈值private static final int BLOOM_EXPECTED_SIZE = 2000; // 布隆过滤器预期大小private static final double BLOOM_FPP = 0.001;       // 布隆过滤器误判率public static void main(String[] args) {// 示例数据List<String> texts = Arrays.asList("自然语言处理是人工智能的重要分支,致力于让计算机理解人类语言。","自然语言处理是人工智能的重要分支,致力于让计算机理解人类语言1。",
//            + "中文处理具有独特的挑战,包括分词、语义理解和语境分析等技术难点。"
//            + "近年来,深度学习在NLP领域取得了显著进展。","自然语言处理作为人工智能的关键领域,专注于计算机对人类语言的理解。",
//            + "中文处理面临分词、语义解析等特殊挑战,需要专门的技术解决方案。"
//            + "深度学习最近在自然语言处理方面获得了重大突破。","机器学习是人工智能的核心技术,通过算法让计算机从数据中学习规律。"+ "监督学习和无监督学习是两大主要范式,广泛应用于各个领域。"+ "深度学习作为机器学习的分支,在图像识别和语音处理中表现出色。");// 查找相似文本Map<String, Set<String>> similarGroups = findSimilarTexts(texts);// 打印结果similarGroups.forEach((key, group) -> {System.out.println("\n相似文本组 (共" + group.size() + "个):");group.forEach(text -> System.out.println("[" + text.substring(0, 20) + "...]"));});}/*** 查找相似文本组*/public static Map<String, Set<String>> findSimilarTexts(List<String> texts) {// 并行计算SimHashMap<String, Long> simHashes = new ConcurrentHashMap<>();IntStream.range(0, texts.size()).parallel().forEach(i -> {String text = texts.get(i);long hash = computeSimHash(text);simHashes.put(text, hash);});// 使用布隆过滤器去重BloomFilter<Long> bloomFilter = BloomFilter.create(Funnels.longFunnel(), BLOOM_EXPECTED_SIZE, BLOOM_FPP);// 分组相似文本Map<String, Set<String>> result = new HashMap<>();for (String text1 : texts) {if (!result.containsKey(text1)) {Set<String> group = new HashSet<>();group.add(text1);for (String text2 : texts) {if (!text1.equals(text2)) {long hash1 = simHashes.get(text1);long hash2 = simHashes.get(text2);// 布隆过滤器快速去重if (bloomFilter.mightContain(hash1 ^ hash2)) {continue;}bloomFilter.put(hash1 ^ hash2);// 动态相似度计算double similarity = calculateSimilarity(text1, text2, hash1, hash2);if (similarity >= BASE_SIMILARITY) {group.add(text2);result.put(text2, group); // 避免重复处理}}}if (group.size() > 1) {result.put(text1, group);}}}return result;}/*** 计算SimHash*/private static long computeSimHash(String text) {// 1. 中文分词与词性过滤List<Term> terms = HanLP.segment(text);Map<String, Double> wordWeights = new HashMap<>();// 2. 计算词权重 (TF-IDF简化版)terms.stream().filter(term -> term.nature.toString().startsWith("n")) // 保留名词.forEach(term -> {String word = term.word;wordWeights.put(word, wordWeights.getOrDefault(word, 0.0) + 1.0);});// 3. 生成特征向量double[] featureVector = new double[SIMHASH_BITS];wordWeights.forEach((word, weight) -> {long wordHash = Murmur3.hash64(word.getBytes(StandardCharsets.UTF_8));for (int i = 0; i < SIMHASH_BITS; i++) {long bitmask = 1L << i;if ((wordHash & bitmask) != 0) {featureVector[i] += weight;} else {featureVector[i] -= weight;}}});// 4. 生成SimHashlong simHash = 0;for (int i = 0; i < SIMHASH_BITS; i++) {if (featureVector[i] > 0) {simHash |= 1L << i;}}return simHash;}/*** 动态相似度计算*/private static double calculateSimilarity(String text1, String text2, long hash1, long hash2) {// 1. 计算汉明距离int hammingDistance = Long.bitCount(hash1 ^ hash2);double maxDistance = SIMHASH_BITS;double baseSimilarity = 1.0 - (hammingDistance / maxDistance);// 2. 动态调整策略int minLength = Math.min(text1.length(), text2.length());// 短文本增强校验if (minLength < MIN_TEXT_LENGTH) {double jaccard = calculateJaccardSimilarity(text1, text2);return (baseSimilarity * 0.6) + (jaccard * 0.4);} // 长文本使用加权策略else {double lengthFactor = Math.min(1.0, minLength / 1000.0);double weightedSimilarity = baseSimilarity * 0.7 + calculateCosineSimilarity(text1, text2) * 0.3;// 长度补偿因子return weightedSimilarity * (0.9 + 0.1 * lengthFactor);}}/*** 计算Jaccard相似度 (适合短文本)*/private static double calculateJaccardSimilarity(String text1, String text2) {List<Term> segment = HanLP.segment(text1);Set<String> set1 = segment.stream().filter(term -> term.nature.toString().startsWith("n")||term.nature.toString().startsWith("v")) // 名词.map(term -> term.word).collect(Collectors.toSet());List<Term> segment2 = HanLP.segment(text2);Set<String> set2 = segment2.stream().filter(term -> term.nature.toString().startsWith("n")||term.nature.toString().startsWith("v")) // 名词.map(term -> term.word).collect(Collectors.toSet());System.out.println(set2+","+set2);Set<String> intersection = new HashSet<>(set1);intersection.retainAll(set2);Set<String> union = new HashSet<>(set1);union.addAll(set2);return union.isEmpty() ? 0 : (double) intersection.size() / union.size();
//        Set<Integer> set1 = new HashSet<>();
//        Set<Integer> set2 = new HashSet<>();
//        // 使用n-gram特征//        int n = Math.min(3, Math.min(text1.length(), text2.length()));
//        for (int i = 0; i <= text1.length() - n; i++) {
//            System.out.println(text1.substring(i, i + n));
//            set1.add(text1.substring(i, i + n).hashCode());
//        }
//        for (int i = 0; i <= text2.length() - n; i++) {
//            System.out.println("1"+ text2.substring(i, i + n));
//            set2.add(text2.substring(i, i + n).hashCode());
//        }//        Set<Integer> intersection = new HashSet<>(set1);
//        intersection.retainAll(set2);
//
//        Set<Integer> union = new HashSet<>(set1);
//        union.addAll(set2);
//
//        return union.isEmpty() ? 0 : (double) intersection.size() / union.size();}/*** 计算余弦相似度 (适合长文本)*/private static double calculateCosineSimilarity(String text1, String text2) {Map<String, Integer> vector1 = buildTermVector(text1);Map<String, Integer> vector2 = buildTermVector(text2);Set<String> vocabulary = new HashSet<>();vocabulary.addAll(vector1.keySet());vocabulary.addAll(vector2.keySet());double dotProduct = 0;double norm1 = 0;double norm2 = 0;for (String term : vocabulary) {int count1 = vector1.getOrDefault(term, 0);int count2 = vector2.getOrDefault(term, 0);dotProduct += count1 * count2;norm1 += count1 * count1;norm2 += count2 * count2;}return norm1 == 0 || norm2 == 0 ? 0 : dotProduct / (Math.sqrt(norm1) * Math.sqrt(norm2));}/*** 构建词频向量*/private static Map<String, Integer> buildTermVector(String text) {List<Term> terms = HanLP.segment(text);Map<String, Integer> vector = new HashMap<>();terms.stream().filter(term -> term.nature.toString().startsWith("n")) // 名词.map(term -> term.word).forEach(word -> vector.put(word, vector.getOrDefault(word, 0) + 1));return vector;}/*** Murmur3 哈希实现 (64位)*/static class Murmur3 {private static final long C1 = 0x87c37b91114253d5L;private static final long C2 = 0x4cf5ad432745937fL;private static final int R1 = 31;private static final int R2 = 27;private static final int M = 5;private static final int N1 = 0x52dce729;static long hash64(byte[] data) {int length = data.length;long hash = 0;int i = 0;// 主体处理 (128位块)for (; i + 16 <= length; i += 16) {long k1 = getLong(data, i);long k2 = getLong(data, i + 8);k1 *= C1; k1 = Long.rotateLeft(k1, R1); k1 *= C2;hash ^= k1; hash = Long.rotateLeft(hash, R2); hash = hash * M + N1;k2 *= C2; k2 = Long.rotateLeft(k2, R2); k2 *= C1;hash ^= k2; hash = Long.rotateLeft(hash, R2); hash = hash * M + N1;}// 尾部处理long k = 0;for (int shift = 0; i < length; i++, shift += 8) {k ^= ((long) data[i] & 0xFF) << shift;}k *= C1; k = Long.rotateLeft(k, R1); k *= C2;hash ^= k;// 最终混合hash ^= length;hash = fmix64(hash);return hash;}private static long getLong(byte[] b, int i) {return ((b[i] & 0xFFL)) |((b[i+1] & 0xFFL) << 8) |((b[i+2] & 0xFFL) << 16) |((b[i+3] & 0xFFL) << 24) |((b[i+4] & 0xFFL) << 32) |((b[i+5] & 0xFFL) << 40) |((b[i+6] & 0xFFL) << 48) |((b[i+7] & 0xFFL) << 56);}private static long fmix64(long h) {h ^= h >>> 33;h *= 0xff51afd7ed558ccdL;h ^= h >>> 33;h *= 0xc4ceb9fe1a85ec53L;h ^= h >>> 33;return h;}}
}

http://www.xdnf.cn/news/890821.html

相关文章:

  • Redis 配置与优化
  • LAUNCHXL-F28379D SCI中断配置
  • 什么是数据孤岛?如何实现从数据孤岛到数据共享?
  • java.io.IOException: Broken pipe
  • leetcode 1061. 按字典序排列最小的等效字符串 中等
  • js 比较两个对象的值是否相等
  • 【数据集】MODIS 8日合成1公里地表温度LST产品
  • Ubuntu20.04配置静态ip
  • 摄像头模块未来技术发展方向
  • 行业赋能篇-2-能源行业安全运维升级
  • MLP(多层感知机)
  • 算法复杂度,咕咕咕
  • 晨读笔记 6-5 (主题:打造15分钟就业服务圈)
  • SpringBoot+Mysql实现的停车场收费小程序系统+文档
  • GPU显存的作用和如何选择
  • 带有输入的CDS和程序调用
  • 极限c++模拟卷
  • 使用 Run:ai Model Streamer 实现模型的高效加载
  • JAVASCRIPT 简化版数据库--智能编程——仙盟创梦IDE
  • AI Agent时代里的SAAS是伪命题还是突破点?
  • spring4第7-8课-AOP的5种通知类型+切点定义详解+执行顺序
  • 如何配置Git LFS?
  • Next打包导出静态文件(纯前端),不要服务器端(node), 隐藏左下角调试模式(“next“: “^15.3.3“,)
  • 力扣刷题Day 71:搜索旋转排序数组(33)
  • dvwa13——CSP Bypass
  • ubuntu 端口复用
  • Ubantu-Docker配置最新镜像源250605
  • PHP 打印扩展开发:从易联云到小鹅通的多驱动集成实践
  • 打造高效多模态RAG系统:原理与评测方法详解
  • Cad 反应器 cad c#二次开发