0%

ssh蜜罐-cowrie

ssh服务蜜罐cowrie

cowrie 文档

docker pull cowrie/cowrie

配置docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
version: '3'

services:
cowrie:
restart: always
build:
context: .
dockerfile: Dockerfile
ports:
- "22:2222"
- "23:2223"
image: "cowrie/cowrie"
volumes:
- /data/cowrie/cowrie-git/etc:/cowrie/cowrie-git/etc
- /data/cowrie/cowrie-git/var:/cowrie/cowrie-git/var

重点cowrie.cfg配置文件


cowrie.cfg配置翻译

# ============================================================================
# 通用选项
# ============================================================================
[honeypot]


# 实例名
sensor_name=myhostname

# shell上显示的虚假主机名
hostname = svr04


# 存储log的位置
log_path = var/log/cowrie


# 存储下载文件的位置
download_path = ${honeypot:state_path}/downloads


# 静态文件位置
share_path = share/cowrie


# 动态文件位置
state_path = var/lib/cowrie


# 配置文件位置
etc_path = etc


# 这仅用于’cat’之类的命令来显示文件的内容.
# 在这里添加文件不足以让它们出现在蜜罐中, 实际的虚拟文件系统保存在filesysem_file中
# 虚拟文件内容的位置
contents_path = honeyfs


#
#
# 该命令必须放在此目录下,并带有正确的路径,例如
# txtcmds/usr/bin/vi
# 在内部运行时,文件的内容将是命令的输出
# 另外,该文件必须存在于虚拟文件系统中
#
# 存放虚拟命令的位置
txtcmds_path = txtcmds


#
# 限制现在文件的最大大小
#download_limit_size = 10485760

# tty用来在UML中记录完整终端交互记录(暂时不懂怎么用)
ttylog = true

# 默认ttylog日志位置
ttylog_path = ${honeypot:state_path}/tty

# 交互超时时间
interactive_timeout = 180

# 设置认证失败的超时时间
authentication_timeout = 120

# cowrie的后端用户, 选项: proxy or shell
# 若用于代理,仅具有request_exec功能(使用真实的代理环境还是虚拟shell环境?)
backend = shell

# 选择时区
timezone = UTC


# ============================================================================
# 网络配置
# ============================================================================


# 当服务器向外访问时使用的ip
#
# (default: not specified)
#out_addr = 0.0.0.0


# 连入过机器的虚拟IP
# 这不影响log,只影响w、last等命令
#
# If not specified, the actual IP address is displayed instead (default
# behaviour).
#
# (default: not specified)
#fake_addr = 192.168.66.254


# 限制可以从因特网连入的地址.如果空则会自己分配
#
#internet_facing_ip = 9.9.9.9


# 记录蜜罐的公网地址 (如果监听127.0.0.1是有用的)
# IP address is obtained by querying http://myip.threatstream.com
#report_public_ip = true



# ============================================================================
# Authentication Specific Options
# ============================================================================


# 实现checklogin()方法的类
#
# 必须是cowrie/core/auth.py中定义的
# 默认是UserDB(密码数据库)
# 可以使用AuthRandom(使一个用户在尝试随机次数后可以登录)
auth_class = UserDB

# 当使用AuthRandom时可以设置
# auth_class_parameters: , ,
# 比如2,5,10等于允许2-5次尝试后,缓存10种组合
#auth_class = AuthRandom
#auth_class_parameters = 2, 5, 10


# 没有认证
# ‘auth_none’可以使用户不需要认证即可登录
#auth_none_enabled = false

# 配置键盘交互式登录(不懂)
auth_keyboard_interactive_enabled = false


# ============================================================================
# 历史SSH特定选项
# ============================================================================

# 记录源端口 (可以针对端口转发的情况)
#reported_ssh_port = 22



# ============================================================================
# SHELL选项
# ============================================================================

[shell]

# 包含虚拟文件系统的Python pickle格式的文件
#这包括Cowrie文件系统的文件名,路径和权限,但不包括文件内容。 这是由真实模板linux安装的bin / createfs实用程序创建的。
filesystem = ${honeypot:share_path}/fs.pickle


# ps命令输出
#
# (default: share/cowrie/cmdoutput.json)
processes = share/cowrie/cmdoutput.json


# 虚拟操作系统框架

arch = linux-x64-lsb



# uname -a返回的值
kernel_version = 3.2.0-4-amd64
kernel_build_string = #1 SMP Debian 3.2.68-1+deb7u1
hardware_platform = x86_64
operating_system = GNU/Linux

# ssh -v 返回的值
ssh_version = OpenSSH_7.9p1, OpenSSL 1.1.1a 20 Nov 2018


# ============================================================================
# SSH 特殊选项
# ============================================================================
[ssh]

# 允许ssh
enabled = true


# 公私钥
rsa_public_key = ${honeypot:state_path}/ssh_host_rsa_key.pub
rsa_private_key = ${honeypot:state_path}/ssh_host_rsa_key
dsa_public_key = ${honeypot:state_path}/ssh_host_dsa_key.pub
dsa_private_key = ${honeypot:state_path}/ssh_host_dsa_key


# 返回到SSH版本
version = SSH-2.0-OpenSSH_6.0p1 Debian-4+deb7u2

# 加密算法
ciphers = aes128-ctr,aes192-ctr,aes256-ctr,aes256-cbc,aes192-cbc,aes128-cbc,3des-cbc,blowfish-cbc,cast128-cbc


# mac算法
macs = hmac-sha2-512,hmac-sha2-384,hmac-sha2-56,hmac-sha1,hmac-md5


# 压缩方法
compression = zlib@openssh.com,zlib,none


# 指定可以连入的ip
# 默认0.0.0.0允许any
#listen_addr = 0.0.0.0
# (use :: for listen to all IPv6 and IPv4 addresses)
#listen_addr = ::


# 监听端口
#listen_port = 2222


# 允许SFTP
sftp_enabled = true


# 允许ssh转发
forwarding = true


# 转发请求到其他蜜罐
forward_redirect = false


# 配置转发地址

# Redirect http/https
# forward_redirect_80 = 127.0.0.1:8000
# forward_redirect_443 = 127.0.0.1:8443

# To record SMTP traffic, install an SMTP honeypoint.
# (e.g https://github.com/awhitehatter/mailoney), run
# python mailoney.py -s yahoo.com -t schizo_open_relay -p 12525
# forward_redirect_25 = 127.0.0.1:12525
# forward_redirect_587 = 127.0.0.1:12525


# This enables tunneling forwarding requests to another address
# Useful for forwarding protocols to a proxy like Squid
# (default: false)
forward_tunnel = false


# Configure where to tunnel the data to.
# forward_tunnel_ = :

# Tunnel http/https
# forward_tunnel_80 = 127.0.0.1:3128
# forward_tunnel_443 = 127.0.0.1:3128


# ============================================================================
# telnet 选项
# ============================================================================
[telnet]

# 支持telnet
enabled = false

# 指定可连入IP
#
# (default: 0.0.0.0) = any IPv4 address
#listen_addr = 0.0.0.0
# (use :: for listen to all IPv6 and IPv4 addresses)
#listen_addr = ::


# 指定监听端口
#
# (default: 2223)
#listen_port = 2223


# Endpoint to listen on for incoming Telnet connections.
# See https://twistedmatrix.com/documents/current/core/howto/endpoints.html#servers
# (default: listen_endpoints = tcp:2223:interface=0.0.0.0)
# (use systemd: endpoint for systemd activation)
# listen_endpoints = systemd:domain=INET:index=0
# For IPv4 and IPv6: listen_endpoints = tcp6:2223:interface=\:\: tcp:2223:interface=0.0.0.0
# Listening on multiple endpoints is supported with a single space seperator
# e.g “listen_endpoints = tcp:2223:interface=0.0.0.0 tcp:2323:interface=0.0.0.0” will result listening both on ports 2223 and 2323
# use authbind for port numbers under 1024

listen_endpoints = tcp:2223:interface=0.0.0.0


# Source Port to report in logs (useful if you use iptables to forward ports to Cowrie)
#reported_port = 23



# ============================================================================
# Database logging Specific Options
# ============================================================================

# XMPP Logging
# Log to an xmpp server.
#
#[database_xmpp]
#server = sensors.carnivore.it
#user = anonymous@sensors.carnivore.it
#password = anonymous
#muc = dionaea.sensors.carnivore.it
#signal_createsession = cowrie-events
#signal_connectionlost = cowrie-events
#signal_loginfailed = cowrie-events
#signal_loginsucceeded = cowrie-events
#signal_command = cowrie-events
#signal_clientversion = cowrie-events
#debug=true




# ============================================================================
# Output Plugins
# These provide an extensible mechanism to send audit log entries to third
# parties. The audit entries contain information on clients connecting to
# the honeypot.
#
# Output entries need to start with ‘output_’ and have the ‘enabled’ entry.
# ============================================================================

#[output_xmpp]
#enabled=true
#server = conference.cowrie.local
#user = cowrie@cowrie.local
#password = cowrie
#muc = hacker_room

# JSON based logging module
#
[output_jsonlog]
enabled = true
logfile = ${honeypot:log_path}/cowrie.json
epoch_timestamp = false

# Supports logging to Elasticsearch
# This is a simple early release
#
#[output_elasticsearch]
#enabled = false
#host = localhost
#port = 9200
#index = cowrie
#type = cowrie
#pipeline = geoip


# Send login attemp information to SANS DShield
# See https://isc.sans.edu/ssh.html
# You must signup for an api key.
# Once registered, find your details at: https://isc.sans.edu/myaccount.html
#
#[output_dshield]
#userid = userid_here
#auth_key = auth_key_here
#batch_size = 100
#enabled = false


# Local Syslog output module
#
# This sends log messages to the local syslog daemon.
# Facility can be:
# KERN, USER, MAIL, DAEMON, AUTH, LPR, NEWS, UUCP, CRON, SYSLOG and LOCAL0 to LOCAL7.
#
# Format can be:
# text, cef
#
#[output_localsyslog]
#enabled = false
#facility = USER
#format = text


# Text output
# This writes audit log entries to a text file
#
# Format can be:
# text, cef
#
#[output_textlog]
#enabled = false
#logfile = ${honeypot:log_path}/audit.log
#format = text


# MySQL logging module
# Database structure for this module is supplied in docs/sql/mysql.sql
#
# MySQL logging requires extra software: sudo apt-get install libmysqlclient-dev
# MySQL logging requires an extra Python module: pip install mysql-python
#
#[output_mysql]
#enabled = false
#host = localhost
#database = cowrie
#username = cowrie
#password = secret
#port = 3306
#debug = false

# Rethinkdb output module
# Rethinkdb output module requires extra Python module: pip install rethinkdb

#[output_rethinkdblog]
#enabled = false
#host = 127.0.0.1
#port = 28015
#table = output
#password =
#db = cowrie

# SQLite3 logging module
#
# Logging to SQLite3 database. To init the database, use the script
# docs/sql/sqlite3.sql:
# sqlite3 <db_file> < docs/sql/sqlite3.sql
#
#[output_sqlite]
#enabled = false
#db_file = cowrie.db

# MongoDB logging module
#
# MongoDB logging requires an extra Python module: pip install pymongo
#
#[output_mongodb]
#enabled = false
#connection_string = mongodb://username:password@host:port/database
#database = dbname


# Splunk HTTP Event Collector (HEC) output module
# sends JSON directly to Splunk over HTTP or HTTPS
# Use ‘https’ if your HEC is encrypted, else ‘http’
# mandatory fields: url, token
# optional fields: index, source, sourcetype, host
#
#[output_splunk]
#enabled = false
#url = https://localhost:8088/services/collector/event
#token = 6A0EA6C6-8006-4E39-FC44-C35FF6E561A8
#index = cowrie
#sourcetype = cowrie
#source = cowrie


# HPFeeds
#
#[output_hpfeeds]
#enabled = false
#server = hpfeeds.mysite.org
#port = 10000
#identifier = abc123
#secret = secret
#debug=false


# VirusTotal output module
# You must signup for an api key.
#
#[output_virustotal]
#enabled = false
#api_key = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
#upload = True
#debug = False
#scan_file = True
#scan_url = False


# Cuckoo output module
#[output_cuckoo]
#enabled = false
# no slash at the end
#url_base = http://127.0.0.1:8090
#user = user
#passwd = passwd
# force will upload duplicated files to cuckoo
#force = 0

# upload to MalShare
#[output_malshare]
#enabled = false

# This will produce a lot of messages - you have been warned….
#[output_slack]
#enabled = false
#channel = channel_that_events_should_be_posted_in
#token = slack_token_for_your_bot
#debug = false


# https://csirtg.io
# You must signup for an api key.
#
#[output_csirtg]
#enabled = false
#username = wes
#feed = scanners
#description = random scanning activity
#token = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef


#[output_socketlog]
#enabled = false
#address = 127.0.0.1:9000
#timeout = 5

# Upload files that cowrie has captured to an S3 (or compatible bucket)
# Files are stored with a name that is the SHA of their contents
#
#[output_s3]
#
# The AWS credentials to use.
# Leave these blank to use botocore’s credential discovery e.g .aws/config or ENV variables.
# As per https://github.com/boto/botocore/blob/develop/botocore/credentials.py#L50-L65
#access_key_id = AKIDEXAMPLE
#secret_access_key = wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY
#
# The bucket to store the files in. The bucket must already exist.
#bucket = my-cowrie-bucket
#
# The region the bucket is in
#region = eu-west-1
#
# An alternate endpoint URL. If you self host a pithos instance you can set
# this to its URL (e.g. https://s3.mydomain.com) - can otherwise be blank
#endpoint =
#
# Whether or not to validate the S3 certificate. Set this to ‘no’ to turn this
# off. Do not do this for real AWS. It’s only needed for self-hosted S3 clone
# where you don’t yet have real certificates.
#verify = no

#[output_influx]
#enabled = false
#host = 127.0.0.1
#port = 8086
#database_name = cowrie
#retention_policy_duration = 12w

[output_kafka]
enabled = false
host = 127.0.0.1
port = 9092
topic = cowrie


#[output_redis]
#enabled = false
#host = 127.0.0.1
#port = 6379
# DB of the redis server. Defaults to 0
#db = 0
# Password of the redis server. Defaults to None
#password = secret
# Name of the list to push to or the channel to publish to. Required
#keyname = cowrie
# Method to use when sending data to redis.
# Can be one of [lpush, rpush, publish]. Defaults to lpush
#send_method = lpush


# Perform Reverse DNS lookup
#[output_reversedns]
#enabled = true
# Timeout in seconds
#timeout = 3

#[output_greynoise]
#enabled = true
#debug=False
# Name of the tags separated by comma, for which the IP has to be scanned for.
# Example “SHODAN,JBOSS_WORM,CPANEL_SCANNER_LOW”
# If there isn’t any specific tag then just leave it “all”
#tags = all
# It’s optional to have API key, so if you don’t want to but
# API key then leave this option commented
#api_key = 1234567890

实际配置vi /data/cowrie/cowrie-git/etc/cowrie.cfg

`[honeypot]
hostname = ubuntu
log_path = log
download_path = dl
report_public_ip = true
share_path= share/cowrie
state_path = /tmp/cowrie/data
etc_path = etc
contents_path = honeyfs
txtcmds_path = txtcmds
ttylog = true
ttylog_path = log/tty
interactive_timeout = 180
authentication_timeout = 120
backend = shell
auth_class = AuthRandom
auth_class_parameters = 2, 5, 10
reported_ssh_port = 2222
data_path = /tmp/cowrie/data

[shell]
filesystem = share/cowrie/fs.pickle
processes = share/cowrie/cmdoutput.json
arch = linux-x64-lsb
kernel_version = 3.2.0-4-amd64
kernel_build_string = #1 SMP Debian 3.2.68-1+deb7u1
hardware_platform = x86_64
operating_system = GNU/Linux

[ssh]
enabled = true
rsa_public_key = etc/ssh_host_rsa_key.pub
rsa_private_key = etc/ssh_host_rsa_key
dsa_public_key = etc/ssh_host_dsa_key.pub
dsa_private_key = etc/ssh_host_dsa_key
version = SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.2
listen_endpoints = tcp:2222:interface=0.0.0.0
sftp_enabled = true
forwarding = true
forward_redirect = false
forward_tunnel = false

[telnet]
enabled = true
listen_endpoints = tcp:2223:interface=0.0.0.0
reported_port = 2223

[output_jsonlog]
enabled = true
logfile = /cowrie/cowrie-git/var/log/cowrie/cowrie.json
epoch_timestamp = false`

生成密钥对,复制到etc下,修改名字,与cowrie.cfg 对应

ssh-keygen -t dsa
ssh-keygen -t rsa

docker-compose up