So I am trying to do instagram-like view, which should be fairly simple. To starts just a UITableViewController
, and a UITableViewCell
that only contains a label and an image:
class FoodListVC: UITableViewController {
let samples = [
"things",
"stuff",
"foo"
]
let images = [
UIImage(named: "photo1"),
UIImage(named: "photo2"),
UIImage(named: "photo3")
]
override func viewDidLoad() {
super.viewDidLoad()
// Row size
tableView.rowHeight = UITableViewAutomaticDimension
tableView.estimatedRowHeight = 88
}
// MARK: - Table view data source
override func numberOfSections(in tableView: UITableView) -> Int {
return 1
}
override func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int {
return samples.count
}
override func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell {
let cell = tableView.dequeueReusableCell(withIdentifier: "foodCell", for: indexPath) as! FoodCell
cell.setContent(title: samples[indexPath.row], image: images[indexPath.row]!)
return cell
}
}
and:
class FoodCell: UITableViewCell {
@IBOutlet weak var titleLabel: UILabel!
@IBOutlet weak var mainImage: UIImageView!
override func awakeFromNib() {
super.awakeFromNib()
// Initialization code
}
override func setSelected(_ selected: Bool, animated: Bool) {
super.setSelected(selected, animated: animated)
// Configure the view for the selected state
}
func setContent(title: String, image: UIImage) {
titleLabel.text = title
mainImage.image = image
mainImage.backgroundColor = UIColor.red
}
}
In the interface layout, things look straightforward as well:
However, once I load the app, the margins are just huge:
My theory is that the images are way larger than the size available to draw it in the phone, and having "Aspect Fit", it just adds some transparency above and below the image.
How can I work around this?
You appear to be letting the size of the image view be dictated by the intrinsic size of the image view. But the intrinsic size of the image view is dictated by the size of the image, regardless of the content mode of the image view.
Rather than relying on the intrinsic size of the image view, you can define a constraint for the size of the image view, e.g.:
class FoodCell: UITableViewCell {
@IBOutlet weak var titleLabel: UILabel!
@IBOutlet weak var mainImage: UIImageView!
private var aspectConstraint: NSLayoutConstraint?
func setContent(title: String, image: UIImage) {
// remove constraint, if any
if aspectConstraint != nil {
mainImage.removeConstraint(aspectConstraint!)
}
// add constraint
let ratio = image.size.width / image.size.height
aspectConstraint = NSLayoutConstraint(item: mainImage, attribute: .width, relatedBy: .equal, toItem: mainImage, attribute: .height, multiplier: ratio, constant: 0)
aspectConstraint?.priority = 999
mainImage.addConstraint(aspectConstraint!)
// set the image and label
titleLabel.text = title
mainImage.image = image
mainImage.backgroundColor = .red
}
}
Note that I set this to a high priority constraint, but less than 1000. I do this for two reasons:
First, cells will generate all sorts of auto layout warnings about the table view cell's intrinsic height if you perform just-in-time adjustments the constraints when you return from cellForRowAt
(even if the constraints are actually fully satisfiable).
Second, when you have cells whose height can change based upon external inputs (e.g. the size of the image), you often want to constrain how tall the image view in the cell can be, IMHO. If you let images in cells grow to absurd heights (what if the image was a vertical photo of a carrot that was 200 px wide and 2000 px tall), you can end up with weird UX where the image view is so tall that you lose the whole "I'm scrolling in a table view" vibe.
So I like to constrain the max height of the image view, regardless of the image. So, in IB, I define a constraint that says that the height of the image view should be <=
to 200 points (use whatever value you want). And I then use a content mode for the image of "scale aspect fit".
Note, in addition to the above, another approach is to leave your constraints as is, but resize the image, itself, so that if it's really large, that it is scaled down to a size appropriate for the width of the image view. For example, you can do something like:
func setContent(title: String, image: UIImage) {
titleLabel.text = title
if image.size.width > mainImage.frame.width {
let size = CGSize(width: mainImage.frame.width, height: max(200, mainImage.frame.width * image.size.height / image.size.width))
mainImage.image = image.scaledAspectFit(to: size)
} else {
mainImage.image = image
}
mainImage.backgroundColor = .red
}
Where:
extension UIImage {
/// Resize the image to be the required size, stretching it as needed.
///
/// - parameter newSize: The new size of the image.
/// - parameter contentMode: The `UIViewContentMode` to be applied when resizing image.
/// Either `.scaleToFill`, `.scaleAspectFill`, or `.scaleAspectFit`.
///
/// - returns: Return `UIImage` of resized image.
func scaled(to newSize: CGSize, contentMode: UIViewContentMode = .scaleToFill) -> UIImage? {
if contentMode == .scaleToFill {
return filled(to: newSize)
} else if contentMode == .scaleAspectFill || contentMode == .scaleAspectFit {
let horizontalRatio = size.width / newSize.width
let verticalRatio = size.height / newSize.height
let ratio: CGFloat!
if contentMode == .scaleAspectFill {
ratio = min(horizontalRatio, verticalRatio)
} else {
ratio = max(horizontalRatio, verticalRatio)
}
let sizeForAspectScale = CGSize(width: size.width / ratio, height: size.height / ratio)
let image = filled(to: sizeForAspectScale)
if contentMode == .scaleAspectFill {
let subRect = CGRect(
x: floor((sizeForAspectScale.width - newSize.width) / 2.0),
y: floor((sizeForAspectScale.height - newSize.height) / 2.0),
width: newSize.width,
height: newSize.height)
return image?.cropped(to: subRect)
}
return image
}
return nil
}
/// Resize the image to be the required size, stretching it as needed.
///
/// - parameter newSize: The new size of the image.
///
/// - returns: Resized `UIImage` of resized image.
func filled(to newSize: CGSize) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(newSize, false, scale)
draw(in: CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height))
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
/// Crop the image to be the required size.
///
/// - parameter bounds: The bounds to which the new image should be cropped.
///
/// - returns: Cropped `UIImage`.
func cropped(to bounds: CGRect) -> UIImage? {
var rect = bounds
rect.size.width *= scale
rect.size.height *= scale
if let imageRef = cgImage?.cropping(to: rect) {
return UIImage(cgImage: imageRef, scale: scale, orientation: imageOrientation)
} else {
return nil
}
}
/// Resize the image to fill the rectange of the specified size, preserving the aspect ratio, trimming if needed.
///
/// - parameter newSize: The new size of the image.
///
/// - returns: Return `UIImage` of resized image.
func scaledAspectFill(to newSize: CGSize) -> UIImage? {
return scaled(to: newSize, contentMode: .scaleAspectFill);
}
/// Resize the image to fit within the required size, preserving the aspect ratio, with no trimming taking place.
///
/// - parameter newSize: The new size of the image.
///
/// - returns: Return `UIImage` of resized image.
func scaledAspectFit(to newSize: CGSize) -> UIImage? {
return scaled(to: newSize, contentMode: .scaleAspectFit)
}
}
This "resize the image" approach has another virtue. Large images in small image views still require a lot of memory. But if you resize the image to a size appropriate for the image view, you avoid wasting memory.